vegas 6.4.1__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vegas/_vegas.pyx ADDED
@@ -0,0 +1,3473 @@
1
+ # cython: language_level=3str, binding=True, boundscheck=False
2
+ # c#ython: profile=True
3
+
4
+ # Created by G. Peter Lepage (Cornell University) in 12/2013.
5
+ # Copyright (c) 2013-26 G. Peter Lepage.
6
+ #
7
+ # This program is free software: you can redistribute it and/or modify
8
+ # it under the terms of the GNU General Public License as published by
9
+ # the Free Software Foundation, either version 3 of the License, or
10
+ # any later version (see <http://www.gnu.org/licenses/>).
11
+ #
12
+ # This program is distributed in the hope that it will be useful,
13
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
14
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
+ # GNU General Public License for more details.
16
+
17
+ from libc.math cimport floor, log, abs, tanh, erf, exp, sqrt
18
+
19
+ import collections
20
+ import copy
21
+ import inspect
22
+ import math
23
+ import multiprocessing
24
+ import pickle
25
+ import os
26
+ import sys
27
+ import tempfile
28
+ import time
29
+ import warnings
30
+
31
+ import numpy
32
+ import gvar
33
+
34
+ cdef double TINY = 10 ** (sys.float_info.min_10_exp + 50) # smallest and biggest
35
+ cdef double HUGE = 10 ** (sys.float_info.max_10_exp - 50) # with extra headroom
36
+ cdef double EPSILON = sys.float_info.epsilon * 1e4 # roundoff error threshold (see Schubert and Gertz Table 2)
37
+
38
+ # AdaptiveMap is used by Integrator
39
+ cdef class AdaptiveMap:
40
+ r""" Adaptive map ``y->x(y)`` for multidimensional ``y`` and ``x``.
41
+
42
+ An :class:`AdaptiveMap` defines a multidimensional map ``y -> x(y)``
43
+ from the unit hypercube, with ``0 <= y[d] <= 1``, to an arbitrary
44
+ hypercube in ``x`` space. Each direction is mapped independently
45
+ with a Jacobian that is tunable (i.e., "adaptive").
46
+
47
+ The map is specified by a grid in ``x``-space that, by definition,
48
+ maps into a uniformly spaced grid in ``y``-space. The nodes of
49
+ the grid are specified by ``grid[d, i]`` where d is the
50
+ direction (``d=0,1...dim-1``) and ``i`` labels the grid point
51
+ (``i=0,1...N``). The mapping for a specific point ``y`` into
52
+ ``x`` space is::
53
+
54
+ y[d] -> x[d] = grid[d, i(y[d])] + inc[d, i(y[d])] * delta(y[d])
55
+
56
+ where ``i(y)=floor(y*N``), ``delta(y)=y*N - i(y)``, and
57
+ ``inc[d, i] = grid[d, i+1] - grid[d, i]``. The Jacobian for this map, ::
58
+
59
+ dx[d]/dy[d] = inc[d, i(y[d])] * N,
60
+
61
+ is piece-wise constant and proportional to the ``x``-space grid
62
+ spacing. Each increment in the ``x``-space grid maps into an increment of
63
+ size ``1/N`` in the corresponding ``y`` space. So regions in
64
+ ``x`` space where ``inc[d, i]`` is small are stretched out
65
+ in ``y`` space, while larger increments are compressed.
66
+
67
+ The ``x`` grid for an :class:`AdaptiveMap` can be specified explicitly
68
+ when the map is created: for example, ::
69
+
70
+ m = AdaptiveMap([[0, 0.1, 1], [-1, 0, 1]])
71
+
72
+ creates a two-dimensional map where the ``x[0]`` interval ``(0,0.1)``
73
+ and ``(0.1,1)`` map into the ``y[0]`` intervals ``(0,0.5)`` and
74
+ ``(0.5,1)`` respectively, while ``x[1]`` intervals ``(-1,0)``
75
+ and ``(0,1)`` map into ``y[1]`` intervals ``(0,0.5)`` and ``(0.5,1)``.
76
+
77
+ More typically, an uniform map with ``ninc`` increments
78
+ is first created: for example, ::
79
+
80
+ m = AdaptiveMap([[0, 1], [-1, 1]], ninc=1000)
81
+
82
+ creates a two-dimensional grid, with 1000 increments in each direction,
83
+ that spans the volume ``0<=x[0]<=1``, ``-1<=x[1]<=1``. This map is then
84
+ trained with data ``f[j]`` corresponding to ``ny`` points ``y[j, d]``,
85
+ with ``j=0...ny-1``, (usually) uniformly distributed in |y| space:
86
+ for example, ::
87
+
88
+ m.add_training_data(y, f)
89
+ m.adapt(alpha=1.5)
90
+
91
+ ``m.adapt(alpha=1.5)`` shrinks grid increments where ``f[j]``
92
+ is large, and expands them where ``f[j]`` is small. Usually
93
+ one has to iterate over several sets of ``y``\s and ``f``\s
94
+ before the grid has fully adapted.
95
+
96
+ The speed with which the grid adapts is determined by parameter ``alpha``.
97
+ Large (positive) values imply rapid adaptation, while small values (much
98
+ less than one) imply slow adaptation. As in any iterative process that
99
+ involves random numbers, it is usually a good idea to slow adaptation
100
+ down in order to avoid instabilities caused by random fluctuations.
101
+
102
+ Args:
103
+ grid (list of arrays): Initial ``x`` grid, where ``grid[d][i]``
104
+ is the ``i``-th node in direction ``d``. Different directions
105
+ can have different numbers of nodes.
106
+ ninc (int or array or ``None``): ``ninc[d]`` (or ``ninc``, if it
107
+ is a number) is the number of increments along direction ``d``
108
+ in the new ``x`` grid. The new grid is designed to give the same
109
+ Jacobian ``dx(y)/dy`` as the original grid. The default value,
110
+ ``ninc=None``, leaves the grid unchanged.
111
+ """
112
+ def __init__(self, grid, ninc=None):
113
+ cdef Py_ssize_t i, d, dim
114
+ cdef double griddi
115
+ if isinstance(grid, AdaptiveMap):
116
+ self.ninc = numpy.array(grid.ninc)
117
+ self.inc = numpy.array(grid.inc)
118
+ self.grid = numpy.array(grid.grid)
119
+ else:
120
+ dim = len(grid)
121
+ len_g = numpy.array([len(x) for x in grid], dtype=numpy.intp)
122
+ if min(len_g) < 2:
123
+ raise ValueError('grid[d] must have at least 2 elements, not {}'.format(min(len_g)))
124
+ self.ninc = len_g - 1
125
+ self.inc = numpy.empty((dim, max(len_g)-1), float)
126
+ self.grid = numpy.empty((dim, self.inc.shape[1] +1), float)
127
+ for d in range(dim):
128
+ for i, griddi in enumerate(sorted(grid[d])):
129
+ self.grid[d, i] = griddi
130
+ for i in range(len_g[d] - 1):
131
+ self.inc[d, i] = self.grid[d, i + 1] - self.grid[d, i]
132
+ self.clear()
133
+ if ninc is not None and not numpy.all(ninc == self.ninc):
134
+ if numpy.all(numpy.asarray(self.ninc) == 1):
135
+ self.make_uniform(ninc=ninc)
136
+ else:
137
+ self.adapt(ninc=ninc)
138
+
139
+ property dim:
140
+ " Number of dimensions."
141
+ def __get__(self):
142
+ return self.grid.shape[0]
143
+
144
+ def region(self, Py_ssize_t d=-1):
145
+ r""" x-space region.
146
+
147
+ ``region(d)`` returns a tuple ``(xl,xu)`` specifying the ``x``-space
148
+ interval covered by the map in direction ``d``. A list containing
149
+ the intervals for each direction is returned if ``d`` is omitted.
150
+ """
151
+ if d < 0:
152
+ return [self.region(d) for d in range(self.dim)]
153
+ else:
154
+ return (self.grid[d, 0], self.grid[d, self.ninc[d]])
155
+
156
+ def extract_grid(self):
157
+ " Return a list of lists specifying the map's grid. "
158
+ cdef Py_ssize_t d
159
+ grid = []
160
+ for d in range(self.dim):
161
+ ng = self.ninc[d] + 1
162
+ grid.append(list(self.grid[d, :ng]))
163
+ return grid
164
+
165
+ def __reduce__(self):
166
+ r""" Capture state for pickling. """
167
+ return (AdaptiveMap, (self.extract_grid(),))
168
+
169
+ def settings(self, ngrid=5):
170
+ r""" Create string with information about grid nodes.
171
+
172
+ Creates a string containing the locations of the nodes
173
+ in the map grid for each direction. Parameter
174
+ ``ngrid`` specifies the maximum number of nodes to print
175
+ (spread evenly over the grid).
176
+ """
177
+ cdef Py_ssize_t d
178
+ ans = []
179
+ if ngrid > 0:
180
+ for d in range(self.dim):
181
+ grid_d = numpy.array(self.grid[d, :self.ninc[d] + 1])
182
+ nskip = int(self.ninc[d] // ngrid)
183
+ if nskip<1:
184
+ nskip = 1
185
+ start = nskip // 2
186
+ ans += [
187
+ " grid[%2d] = %s"
188
+ % (
189
+ d,
190
+ numpy.array2string(
191
+ grid_d[start::nskip], precision=3,
192
+ prefix=' grid[xx] = ')
193
+ )
194
+ ]
195
+ return '\n'.join(ans) + '\n'
196
+
197
+ def random(self, n=None):
198
+ " Create ``n`` random points in |y| space. "
199
+ if n is None:
200
+ y = gvar.RNG.random(self.dim)
201
+ else:
202
+ y = gvar.RNG.random((n, self.dim))
203
+ return self(y)
204
+
205
+ def make_uniform(self, ninc=None):
206
+ r""" Replace the grid with a uniform grid.
207
+
208
+ The new grid has ``ninc[d]`` (or ``ninc``, if it is a number)
209
+ increments along each direction if ``ninc`` is specified.
210
+ If ``ninc=None`` (default), the new grid has the same number
211
+ of increments in each direction as the old grid.
212
+ """
213
+ cdef Py_ssize_t i, d
214
+ cdef Py_ssize_t dim = self.grid.shape[0]
215
+ cdef double[:] tmp
216
+ cdef double[:, ::1] new_grid
217
+ if ninc is None:
218
+ ninc = numpy.asarray(self.ninc)
219
+ elif numpy.shape(ninc) == ():
220
+ ninc = numpy.full(self.dim, int(ninc), dtype=numpy.intp)
221
+ elif numpy.shape(ninc) == (self.dim,):
222
+ ninc = numpy.asarray(ninc)
223
+ else:
224
+ raise ValueError('ninc has wrong shape -- {}'.format(numpy.shape(ninc)))
225
+ if min(ninc) < 1:
226
+ raise ValueError(
227
+ "no of increments < 1 in AdaptiveMap -- %s"
228
+ % str(ninc)
229
+ )
230
+ new_inc = numpy.empty((dim, max(ninc)), float)
231
+ new_grid = numpy.empty((dim, new_inc.shape[1] + 1), float)
232
+ for d in range(dim):
233
+ tmp = numpy.linspace(self.grid[d, 0], self.grid[d, self.ninc[d]], ninc[d] + 1)
234
+ for i in range(ninc[d] + 1):
235
+ new_grid[d, i] = tmp[i]
236
+ for i in range(ninc[d]):
237
+ new_inc[d, i] = new_grid[d, i + 1] - new_grid[d, i]
238
+ self.ninc = ninc
239
+ self.grid = new_grid
240
+ self.inc = new_inc
241
+ self.clear()
242
+
243
+ def __call__(self, y):
244
+ r""" Return ``x`` values corresponding to ``y``.
245
+
246
+ ``y`` can be a single ``dim``-dimensional point, or it
247
+ can be an array ``y[i,j, ..., d]`` of such points (``d=0..dim-1``).
248
+
249
+ If ``y=None`` (default), ``y`` is set equal to a (uniform) random point
250
+ in the volume.
251
+ """
252
+
253
+ if y is None:
254
+ y = gvar.RNG.random(size=self.dim)
255
+ else:
256
+ y = numpy.asarray(y, float)
257
+ y_shape = y.shape
258
+ y.shape = -1, y.shape[-1]
259
+ x = 0 * y
260
+ jac = numpy.empty(y.shape[0], float)
261
+ self.map(y, x, jac)
262
+ x.shape = y_shape
263
+ return x
264
+
265
+ def jac1d(self, y):
266
+ r""" Return the map's Jacobian at ``y`` for each direction.
267
+
268
+ ``y`` can be a single ``dim``-dimensional point, or it
269
+ can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
270
+ Returns an array ``jac`` where ``jac[i,j,...,d]`` is the
271
+ (one-dimensional) Jacobian (``dx[d]/dy[d]``) corresponding
272
+ to ``y[i,j,...,d]``.
273
+ """
274
+ cdef Py_ssize_t dim = self.grid.shape[0]
275
+ cdef Py_ssize_t i, d, ninc, ny, iy
276
+ cdef double y_ninc, dy_ninc
277
+ cdef double[:,::1] jac
278
+ y = numpy.asarray(y)
279
+ y_shape = y.shape
280
+ y.shape = -1, y.shape[-1]
281
+ ny = y.shape[0]
282
+ jac = numpy.empty(y.shape, float)
283
+ for i in range(ny):
284
+ for d in range(dim):
285
+ ninc = self.ninc[d]
286
+ y_ninc = y[i, d] * ninc
287
+ iy = <int>floor(y_ninc)
288
+ dy_ninc = y_ninc - iy
289
+ if iy < ninc:
290
+ jac[i, d] = self.inc[d, iy] * ninc
291
+ else:
292
+ jac[i, d] = self.inc[d, ninc - 1] * ninc
293
+ ans = numpy.asarray(jac)
294
+ ans.shape = y.shape
295
+ return ans
296
+
297
+ def jac(self, y):
298
+ r""" Return the map's Jacobian at ``y``.
299
+
300
+ ``y`` can be a single ``dim``-dimensional point, or it
301
+ can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
302
+ Returns an array ``jac`` where ``jac[i,j,...]`` is the
303
+ (multidimensional) Jacobian (``dx/dy``) corresponding
304
+ to ``y[i,j,...]``.
305
+ """
306
+ return numpy.prod(self.jac1d(y), axis=-1)
307
+
308
+ # @cython.boundscheck(False)
309
+ # @cython.wraparound(False)
310
+ cpdef map(
311
+ self,
312
+ double[:, ::1] y,
313
+ double[:, ::1] x,
314
+ double[::1] jac,
315
+ Py_ssize_t ny=-1
316
+ ):
317
+ r""" Map y to x, where jac is the Jacobian (``dx/dy``).
318
+
319
+ ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
320
+ ``x[j, d]`` is filled with the corresponding ``x`` values,
321
+ and ``jac[j]`` is filled with the corresponding Jacobian
322
+ values. ``x`` and ``jac`` must be preallocated: for example, ::
323
+
324
+ x = numpy.empty(y.shape, float)
325
+ jac = numpy.empty(y.shape[0], float)
326
+
327
+ Args:
328
+ y (array): ``y`` values to be mapped. ``y`` is a contiguous
329
+ 2-d array, where ``y[j, d]`` contains values for points
330
+ along direction ``d``.
331
+ x (array): Container for ``x[j, d]`` values corresponding
332
+ to ``y[j, d]``. Must be a contiguous 2-d array.
333
+ jac (array): Container for Jacobian values ``jac[j]`` (``= dx/dy``)
334
+ corresponding to ``y[j, d]``. Must be a contiguous 1-d array.
335
+ ny (int): Number of ``y`` points: ``y[j, d]`` for ``d=0...dim-1``
336
+ and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
337
+ omitted (or negative).
338
+ """
339
+ cdef Py_ssize_t ninc
340
+ cdef Py_ssize_t dim = self.inc.shape[0]
341
+ cdef Py_ssize_t i, iy, d
342
+ cdef double y_ninc, dy_ninc, tmp_jac
343
+ if ny < 0:
344
+ ny = y.shape[0]
345
+ elif ny > y.shape[0]:
346
+ raise ValueError('ny > y.shape[0]: %d > %d' % (ny, y.shape[0]))
347
+ for i in range(ny):
348
+ jac[i] = 1.
349
+ for d in range(dim):
350
+ ninc = self.ninc[d]
351
+ y_ninc = y[i, d] * ninc
352
+ iy = <int>floor(y_ninc)
353
+ dy_ninc = y_ninc - iy
354
+ if iy < ninc:
355
+ x[i, d] = self.grid[d, iy] + self.inc[d, iy] * dy_ninc
356
+ jac[i] *= self.inc[d, iy] * ninc
357
+ else:
358
+ x[i, d] = self.grid[d, ninc]
359
+ jac[i] *= self.inc[d, ninc - 1] * ninc
360
+ return
361
+
362
+ cpdef invmap(
363
+ self,
364
+ double[:, ::1] x,
365
+ double[:, ::1] y,
366
+ double[::1] jac,
367
+ Py_ssize_t nx=-1
368
+ ):
369
+ r""" Map x to y, where jac is the Jacobian (``dx/dy``).
370
+
371
+ ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
372
+ ``x[j, d]`` is filled with the corresponding ``x`` values,
373
+ and ``jac[j]`` is filled with the corresponding Jacobian
374
+ values. ``x`` and ``jac`` must be preallocated: for example, ::
375
+
376
+ x = numpy.empty(y.shape, float)
377
+ jac = numpy.empty(y.shape[0], float)
378
+
379
+ Args:
380
+ x (array): ``x`` values to be mapped to ``y``-space. ``x``
381
+ is a contiguous 2-d array, where ``x[j, d]`` contains
382
+ values for points along direction ``d``.
383
+ y (array): Container for ``y[j, d]`` values corresponding
384
+ to ``x[j, d]``. Must be a contiguous 2-d array
385
+ jac (array): Container for Jacobian values ``jac[j]`` (``= dx/dy``)
386
+ corresponding to ``y[j, d]``. Must be a contiguous 1-d array
387
+ nx (int): Number of ``x`` points: ``x[j, d]`` for ``d=0...dim-1``
388
+ and ``j=0...nx-1``. ``nx`` is set to ``x.shape[0]`` if it is
389
+ omitted (or negative).
390
+ """
391
+ cdef Py_ssize_t ninc
392
+ cdef Py_ssize_t dim = self.inc.shape[0]
393
+ cdef Py_ssize_t[:] iy
394
+ cdef Py_ssize_t i, iyi, d
395
+ cdef double y_ninc, dy_ninc, tmp_jac
396
+ if nx < 0:
397
+ nx = x.shape[0]
398
+ elif nx > x.shape[0]:
399
+ raise ValueError('nx > x.shape[0]: %d > %d' % (nx, x.shape[0]))
400
+ for i in range(nx):
401
+ jac[i] = 1.
402
+ for d in range(dim):
403
+ ninc = self.ninc[d]
404
+ iy = numpy.searchsorted(self.grid[d, :], x[:, d], side='right')
405
+ for i in range(nx):
406
+ if iy[i] > 0 and iy[i] <= ninc:
407
+ iyi = iy[i] - 1
408
+ y[i, d] = (iyi + (x[i, d] - self.grid[d, iyi]) / self.inc[d, iyi]) / ninc
409
+ jac[i] *= self.inc[d, iyi] * ninc
410
+ elif iy[i] <= 0:
411
+ y[i, d] = 0.
412
+ jac[i] *= self.inc[d, 0] * ninc
413
+ elif iy[i] > ninc:
414
+ y[i, d] = 1.0
415
+ jac[i] *= self.inc[d, ninc - 1] * ninc
416
+ return
417
+
418
+
419
+ # @cython.boundscheck(False)
420
+ # @cython.wraparound(False)
421
+ cpdef add_training_data(
422
+ self,
423
+ double[:, ::1] y,
424
+ double[::1] f,
425
+ Py_ssize_t ny=-1,
426
+ ):
427
+ r""" Add training data ``f`` for ``y``-space points ``y``.
428
+
429
+ Accumulates training data for later use by ``self.adapt()``.
430
+ Grid increments will be made smaller in regions where
431
+ ``f`` is larger than average, and larger where ``f``
432
+ is smaller than average. The grid is unchanged (converged?)
433
+ when ``f`` is constant across the grid.
434
+
435
+ Args:
436
+ y (array): ``y`` values corresponding to the training data.
437
+ ``y`` is a contiguous 2-d array, where ``y[j, d]``
438
+ is for points along direction ``d``.
439
+ f (array): Training function values. ``f[j]`` corresponds to
440
+ point ``y[j, d]`` in ``y``-space.
441
+ ny (int): Number of ``y`` points: ``y[j, d]`` for ``d=0...dim-1``
442
+ and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
443
+ omitted (or negative).
444
+ """
445
+ cdef Py_ssize_t ninc
446
+ cdef Py_ssize_t dim = self.inc.shape[0]
447
+ cdef Py_ssize_t iy
448
+ cdef Py_ssize_t i, d
449
+ if self.sum_f is None:
450
+ shape = (self.inc.shape[0], self.inc.shape[1])
451
+ self.sum_f = numpy.zeros(shape, float)
452
+ self.n_f = numpy.zeros(shape, float) + TINY
453
+ if ny < 0:
454
+ ny = y.shape[0]
455
+ elif ny > y.shape[0]:
456
+ raise ValueError('ny > y.shape[0]: %d > %d' % (ny, y.shape[0]))
457
+ for d in range(dim):
458
+ ninc = self.ninc[d]
459
+ for i in range(ny):
460
+ if y[i, d] > 0 and y[i, d] < 1:
461
+ iy = <int> floor(y[i, d] * ninc)
462
+ self.sum_f[d, iy] += abs(f[i])
463
+ self.n_f[d, iy] += 1
464
+ return
465
+
466
+ # @cython.boundscheck(False)
467
+ def adapt(self, double alpha=0.0, ninc=None):
468
+ r""" Adapt grid to accumulated training data.
469
+
470
+ ``self.adapt(...)`` projects the training data onto
471
+ each axis independently and maps it into ``x`` space.
472
+ It shrinks ``x``-grid increments in regions where the
473
+ projected training data is large, and grows increments
474
+ where the projected data is small. The grid along
475
+ any direction is unchanged if the training data
476
+ is constant along that direction.
477
+
478
+ The number of increments along a direction can be
479
+ changed by setting parameter ``ninc`` (array or number).
480
+
481
+ The grid does not change if no training data has
482
+ been accumulated, unless ``ninc`` is specified, in
483
+ which case the number of increments is adjusted
484
+ while preserving the relative density of increments
485
+ at different values of ``x``.
486
+
487
+ Args:
488
+ alpha (double): Determines the speed with which the grid
489
+ adapts to training data. Large (postive) values imply
490
+ rapid evolution; small values (much less than one) imply
491
+ slow evolution. Typical values are of order one. Choosing
492
+ ``alpha<0`` causes adaptation to the unmodified training
493
+ data (usually not a good idea).
494
+ ninc (int or array or None): The number of increments in the new
495
+ grid is ``ninc[d]`` (or ``ninc``, if it is a number)
496
+ in direction ``d``. The number is unchanged from the
497
+ old grid if ``ninc`` is omitted (or equals ``None``,
498
+ which is the default).
499
+ """
500
+ cdef double[:, ::1] new_grid
501
+ cdef double[::1] avg_f, tmp_f
502
+ cdef double sum_f, acc_f, f_ninc
503
+ cdef Py_ssize_t old_ninc
504
+ cdef Py_ssize_t dim = self.grid.shape[0]
505
+ cdef Py_ssize_t i, j
506
+ cdef Py_ssize_t[:] new_ninc
507
+
508
+ # initialization
509
+ if ninc is None:
510
+ new_ninc = numpy.array(self.ninc)
511
+ elif numpy.shape(ninc) == ():
512
+ new_ninc = numpy.full(dim, int(ninc), numpy.intp)
513
+ elif len(ninc) == dim:
514
+ new_ninc = numpy.array(ninc, numpy.intp)
515
+ else:
516
+ raise ValueError('badly formed ninc = ' + str(ninc))
517
+ if min(new_ninc) < 1:
518
+ raise ValueError('ninc < 1: ' + str(list(new_ninc)))
519
+ if max(new_ninc) == 1:
520
+ new_grid = numpy.empty((dim, 2), float)
521
+ for d in range(dim):
522
+ new_grid[d, 0] = self.grid[d, 0]
523
+ new_grid[d, 1] = self.grid[d, self.ninc[d]]
524
+ self.grid = numpy.asarray(new_grid)
525
+ self.inc = numpy.empty((dim, 1), float)
526
+ self.ninc = numpy.array(dim * [1], dtype=numpy.intp)
527
+ for d in range(dim):
528
+ self.inc[d, 0] = self.grid[d, 1] - self.grid[d, 0]
529
+ self.clear()
530
+ return
531
+
532
+ # smooth and regrid
533
+ new_grid = numpy.empty((dim, max(new_ninc) + 1), float)
534
+ avg_f = numpy.ones(self.inc.shape[1], float) # default = uniform
535
+ if alpha > 0 and max(self.ninc) > 1:
536
+ tmp_f = numpy.empty(self.inc.shape[1], float)
537
+ for d in range(dim):
538
+ old_ninc = self.ninc[d]
539
+ if alpha != 0 and old_ninc > 1:
540
+ if self.sum_f is not None:
541
+ for i in range(old_ninc):
542
+ if self.n_f[d, i] > 0:
543
+ avg_f[i] = self.sum_f[d, i] / self.n_f[d, i]
544
+ else:
545
+ avg_f[i] = 0.
546
+ if alpha > 0:
547
+ # smooth
548
+ tmp_f[0] = abs(7. * avg_f[0] + avg_f[1]) / 8.
549
+ tmp_f[old_ninc - 1] = abs(7. * avg_f[old_ninc - 1] + avg_f[old_ninc - 2]) / 8.
550
+ sum_f = tmp_f[0] + tmp_f[old_ninc - 1]
551
+ for i in range(1, old_ninc - 1):
552
+ tmp_f[i] = abs(6. * avg_f[i] + avg_f[i-1] + avg_f[i+1]) / 8.
553
+ sum_f += tmp_f[i]
554
+ if sum_f > 0:
555
+ for i in range(old_ninc):
556
+ avg_f[i] = tmp_f[i] / sum_f + TINY
557
+ else:
558
+ for i in range(old_ninc):
559
+ avg_f[i] = TINY
560
+ for i in range(old_ninc):
561
+ if avg_f[i] > 0 and avg_f[i] <= 0.99999999:
562
+ avg_f[i] = (-(1 - avg_f[i]) / log(avg_f[i])) ** alpha
563
+ # regrid
564
+ new_grid[d, 0] = self.grid[d, 0]
565
+ new_grid[d, new_ninc[d]] = self.grid[d, old_ninc]
566
+ i = 0 # new_x index
567
+ j = -1 # self_x index
568
+ acc_f = 0 # sum(avg_f) accumulated
569
+ f_ninc = 0.
570
+ for i in range(old_ninc):
571
+ f_ninc += avg_f[i]
572
+ f_ninc /= new_ninc[d] # amount of acc_f per new increment
573
+ for i in range(1, new_ninc[d]):
574
+ while acc_f < f_ninc:
575
+ j += 1
576
+ if j < old_ninc:
577
+ acc_f += avg_f[j]
578
+ else:
579
+ break
580
+ else:
581
+ acc_f -= f_ninc
582
+ new_grid[d, i] = (
583
+ self.grid[d, j+1]
584
+ - (acc_f / avg_f[j]) * self.inc[d, j]
585
+ )
586
+ continue
587
+ break
588
+ self.grid = numpy.asarray(new_grid)
589
+ self.inc = numpy.empty((dim, self.grid.shape[1] - 1), float)
590
+ for d in range(dim):
591
+ for i in range(new_ninc[d]):
592
+ self.inc[d, i] = self.grid[d, i + 1] - self.grid[d, i]
593
+ self.ninc = numpy.asarray(new_ninc)
594
+ self.clear()
595
+
596
+ def clear(self):
597
+ " Clear information accumulated by :meth:`AdaptiveMap.add_training_data`. "
598
+ self.sum_f = None
599
+ self.n_f = None
600
+
601
+ def show_grid(self, ngrid=40, axes=None, shrink=False, plotter=None):
602
+ r""" Display plots showing the current grid.
603
+
604
+ Args:
605
+ ngrid (int): The number of grid nodes in each
606
+ direction to include in the plot. The default is 40.
607
+ axes: List of pairs of directions to use in
608
+ different views of the grid. Using ``None`` in
609
+ place of a direction plots the grid for only one
610
+ direction. Omitting ``axes`` causes a default
611
+ set of pairings to be used.
612
+ shrink: Display entire range of each axis
613
+ if ``False``; otherwise shrink range to include
614
+ just the nodes being displayed. The default is
615
+ ``False``.
616
+ plotter: :mod:`matplotlib` plotter to use for plots; plots
617
+ are not displayed if set. Ignored if ``None``, and
618
+ plots are displayed using ``matplotlib.pyplot``.
619
+ """
620
+ if plotter is not None:
621
+ plt = plotter
622
+ else:
623
+ try:
624
+ import matplotlib.pyplot as plt
625
+ except ImportError:
626
+ warnings.warn('matplotlib not installed; cannot show_grid')
627
+ return
628
+ dim = self.dim
629
+ if axes is None:
630
+ axes = []
631
+ if dim == 1:
632
+ axes = [(0, None)]
633
+ for d in range(dim):
634
+ axes.append((d, (d + 1) % dim))
635
+ else:
636
+ if len(axes) <= 0:
637
+ return
638
+ for dx,dy in axes:
639
+ if dx is not None and (dx < 0 or dx >= dim):
640
+ raise ValueError('bad directions: %s' % str((dx, dy)))
641
+ if dy is not None and (dy < 0 or dy >= dim):
642
+ raise ValueError('bad directions: %s' % str((dx, dy)))
643
+ fig = plt.figure()
644
+ def plotdata(idx, grid=numpy.asarray(self.grid), ninc=numpy.asarray(self.ninc), axes=axes):
645
+ dx, dy = axes[idx[0]]
646
+ if dx is not None:
647
+ nskip = int(ninc[dx] // ngrid)
648
+ if nskip < 1:
649
+ nskip = 1
650
+ start = nskip // 2
651
+ xrange = [grid[dx, 0], grid[dx, ninc[dx]]]
652
+ xgrid = grid[dx, start::nskip]
653
+ xlabel = 'x[%d]' % dx
654
+ else:
655
+ xrange = [0., 1.]
656
+ xgrid = None
657
+ xlabel = ''
658
+ if dy is not None:
659
+ nskip = int(ninc[dy] // ngrid)
660
+ if nskip < 1:
661
+ nskip = 1
662
+ start = nskip // 2
663
+ yrange = [grid[dy, 0], grid[dy, ninc[dy]]]
664
+ ygrid = grid[dy, start::nskip]
665
+ ylabel = 'x[%d]' % dy
666
+ else:
667
+ yrange = [0., 1.]
668
+ ygrid = None
669
+ ylabel = ''
670
+ if shrink:
671
+ if xgrid is not None:
672
+ xrange = [min(xgrid), max(xgrid)]
673
+ if ygrid is not None:
674
+ yrange = [min(ygrid), max(ygrid)]
675
+ if None not in [dx, dy]:
676
+ fig_caption = 'axes %d, %d' % (dx, dy)
677
+ elif dx is None and dy is not None:
678
+ fig_caption = 'axis %d' % dy
679
+ elif dx is not None and dy is None:
680
+ fig_caption = 'axis %d' % dx
681
+ else:
682
+ return
683
+ fig.clear()
684
+ plt.title(
685
+ "%s (press 'n', 'p', 'q' or a digit)"
686
+ % fig_caption
687
+ )
688
+ plt.xlabel(xlabel)
689
+ plt.ylabel(ylabel)
690
+ if xgrid is not None:
691
+ for i in range(len(xgrid)):
692
+ plt.plot([xgrid[i], xgrid[i]], yrange, 'k-')
693
+ if ygrid is not None:
694
+ for i in range(len(ygrid)):
695
+ plt.plot(xrange, [ygrid[i], ygrid[i]], 'k-')
696
+ plt.xlim(*xrange)
697
+ plt.ylim(*yrange)
698
+
699
+ plt.draw()
700
+
701
+ idx = [0]
702
+ def onpress(event, idx=idx):
703
+ try: # digit?
704
+ idx[0] = int(event.key)
705
+ except ValueError:
706
+ if event.key == 'n':
707
+ idx[0] += 1
708
+ if idx[0] >= len(axes):
709
+ idx[0] = len(axes) - 1
710
+ elif event.key == 'p':
711
+ idx[0] -= 1
712
+ if idx[0] < 0:
713
+ idx[0] = 0
714
+ elif event.key == 'q':
715
+ plt.close()
716
+ return
717
+ else:
718
+ return
719
+ plotdata(idx)
720
+
721
+ fig.canvas.mpl_connect('key_press_event', onpress)
722
+ plotdata(idx)
723
+ if plotter is None:
724
+ plt.show()
725
+ else:
726
+ return plt
727
+
728
+ def adapt_to_samples(self, x, f, nitn=5, alpha=1.0, nproc=1):
729
+ r""" Adapt map to data ``{x, f(x)}``.
730
+
731
+ Replace grid with one that is optimized for integrating
732
+ function ``f(x)``. New grid is found iteratively
733
+
734
+ Args:
735
+ x (array): ``x[:, d]`` are the components of the sample points
736
+ in direction ``d=0,1...self.dim-1``.
737
+ f (callable or array): Function ``f(x)`` to be adapted to. If
738
+ ``f`` is an array, it is assumes to contain values ``f[i]``
739
+ corresponding to the function evaluated at points ``x[i]``.
740
+ nitn (int): Number of iterations to use in adaptation. Default
741
+ is ``nitn=5``.
742
+ alpha (float): Damping parameter for adaptation. Default
743
+ is ``alpha=1.0``. Smaller values slow the iterative
744
+ adaptation, to improve stability of convergence.
745
+ nproc (int or None): Number of processes/processors to use.
746
+ If ``nproc>1`` Python's :mod:`multiprocessing` module is
747
+ used to spread the calculation across multiple processors.
748
+ There is a significant overhead involved in using
749
+ multiple processors so this option is useful mainly
750
+ when very high dimenions or large numbers of samples
751
+ are involved. When using the :mod:`multiprocessing`
752
+ module in its default mode for MacOS and Windows,
753
+ it is important that the main module can be
754
+ safely imported (i.e., without launching new
755
+ processes). This can be accomplished with
756
+ some version of the ``if __name__ == '__main__`:``
757
+ construct in the main module: e.g., ::
758
+
759
+ if __name__ == '__main__':
760
+ main()
761
+
762
+ This is not an issue on other Unix platforms.
763
+ See the :mod:`multiprocessing` documentation
764
+ for more information.
765
+ Set ``nproc=None`` to use all the processors
766
+ on the machine (equivalent to ``nproc=os.cpu_count()``).
767
+ Default value is ``nproc=1``. (Requires Python 3.3 or later.)
768
+ """
769
+ cdef Py_ssize_t i, tmp_ninc, old_ninc
770
+ x = numpy.ascontiguousarray(x)
771
+ if len(x.shape) != 2 or x.shape[1] != self.dim:
772
+ raise ValueError('incompatible shape of x: {}'.format(x.shape))
773
+ if nproc is None:
774
+ nproc = os.cpu_count()
775
+ if nproc is None:
776
+ raise ValueError("need to specify nproc (nproc=None does't work on this machine)")
777
+ nproc = int(nproc)
778
+ if callable(f):
779
+ fx = numpy.ascontiguousarray(f(x))
780
+ else:
781
+ fx = numpy.ascontiguousarray(f)
782
+ if fx.shape[0] != x.shape[0]:
783
+ raise ValueError('shape of x and f(x) mismatch: {} vs {}'.format(x.shape, fx.shape))
784
+ old_ninc = max(max(self.ninc), Integrator.defaults['maxinc_axis'])
785
+ tmp_ninc = type(old_ninc)(min(old_ninc, x.shape[0] / 10.))
786
+ if tmp_ninc < 2:
787
+ raise ValueError('not enough samples: {}'.format(x.shape[0]))
788
+ y = numpy.empty(x.shape, float)
789
+ jac = numpy.empty(x.shape[0], float)
790
+ self.adapt(ninc=tmp_ninc)
791
+ if nproc > 1:
792
+ pool = multiprocessing.Pool(processes=nproc)
793
+ for i in range(nitn):
794
+ self._add_training_data(x, f, fx, nproc, pool)
795
+ self.adapt(alpha=alpha, ninc=tmp_ninc)
796
+ pool.close()
797
+ pool.join()
798
+ else:
799
+ for i in range(nitn):
800
+ self.invmap(x, y, jac)
801
+ self.add_training_data(y, (jac * fx) ** 2)
802
+ self.adapt(alpha=alpha, ninc=tmp_ninc)
803
+ if numpy.any(tmp_ninc != old_ninc):
804
+ self.adapt(ninc=old_ninc)
805
+
806
+ def _add_training_data(self, x, f, fx, nproc, pool):
807
+ " Used by self.adapt_to_samples in multiprocessing mode. "
808
+ nx = x.shape[0]
809
+ end = 0
810
+ args = []
811
+ for i in range(nproc):
812
+ nx = (x.shape[0] - end) // (nproc - i)
813
+ start = end
814
+ end = start + nx
815
+ args += [(
816
+ self,
817
+ x[start:end, :],
818
+ fx[start:end]
819
+ )]
820
+ res = pool.starmap(self._collect_training_data, args, 1)
821
+ self.sum_f = numpy.sum([resi[0] for resi in res], axis=0)
822
+ self.n_f = numpy.sum([resi[1] for resi in res], axis=0) + TINY
823
+
824
+ @staticmethod
825
+ def _collect_training_data(map, x, fx):
826
+ " Used by self.adapt_to_samples in multiprocessing mode. "
827
+ map.clear()
828
+ y = numpy.empty(x.shape, float)
829
+ jac = numpy.empty(x.shape[0], float)
830
+ map.invmap(x, y, jac)
831
+ map.add_training_data(y, (fx * jac)**2)
832
+ return (numpy.asarray(map.sum_f), numpy.asarray(map.n_f))
833
+
834
+ cdef class Integrator(object):
835
+ r""" Adaptive multidimensional Monte Carlo integration.
836
+
837
+ :class:`vegas.Integrator` objects make Monte Carlo
838
+ estimates of multidimensional functions ``f(x)``
839
+ where ``x[d]`` is a point in the integration volume::
840
+
841
+ integ = vegas.Integrator(integration_region)
842
+
843
+ result = integ(f, nitn=10, neval=10000)
844
+
845
+ The integator makes ``nitn`` estimates of the integral, each
846
+ using at most ``neval`` samples of the integrand, as it adapts to
847
+ the specific features of the integrand. Successive estimates (iterations)
848
+ typically improve in accuracy until the integrator has fully
849
+ adapted. The integrator returns the weighted average of all
850
+ ``nitn`` estimates, together with an estimate of the statistical
851
+ (Monte Carlo) uncertainty in that estimate of the integral. The
852
+ result is an object of type :class:`RAvg` (which is derived
853
+ from :class:`gvar.GVar`).
854
+
855
+ Integrands ``f(x)`` return numbers, arrays of numbers (any shape), or
856
+ dictionaries whose values are numbers or arrays (any shape). Each number
857
+ returned by an integrand corresponds to a different integrand. When
858
+ arrays are returned, |vegas| adapts to the first number
859
+ in the flattened array. When dictionaries are returned,
860
+ |vegas| adapts to the first number in the value corresponding to
861
+ the first key.
862
+
863
+ |vegas| can generate integration points in batches for integrands
864
+ built from classes derived from :class:`vegas.LBatchIntegrand`, or
865
+ integrand functions decorated by :func:`vegas.lbatchintegrand`. Batch
866
+ integrands are typically much faster, especially if they are coded in
867
+ Cython or C/C++ or Fortran.
868
+
869
+ |Integrator|\s have a large number of parameters but the
870
+ only ones that most people will care about are: the
871
+ number ``nitn`` of iterations of the |vegas| algorithm;
872
+ the maximum number ``neval`` of integrand evaluations per
873
+ iteration; and the damping parameter ``alpha``, which is used
874
+ to slow down the adaptive algorithms when they would otherwise
875
+ be unstable (e.g., with very peaky integrands). Setting parameter
876
+ ``analyzer=vegas.reporter()`` is sometimes useful, as well,
877
+ since it causes |vegas| to print (on ``sys.stdout``)
878
+ intermediate results from each iteration, as they are
879
+ produced. This helps when each iteration takes a long time
880
+ to complete (e.g., longer than an hour) because it allows you to
881
+ monitor progress as it is being made (or not).
882
+
883
+ Args:
884
+ map (array, dictionary, :class:`vegas.AdaptiveMap` or :class:`vegas.Integrator`):
885
+ The integration region is specified by an array ``map[d, i]``
886
+ where ``d`` is the direction and ``i=0,1`` specify the lower
887
+ and upper limits of integration in direction ``d``. Integration
888
+ points ``x`` are packaged as arrays ``x[d]`` when
889
+ passed to the integrand ``f(x)``.
890
+
891
+ More generally, the integrator packages integration points in
892
+ multidimensional arrays ``x[d1, d2..dn]`` when the integration
893
+ limits are specified by ``map[d1, d2...dn, i]`` with ``i=0,1``.
894
+ These arrays can have any shape.
895
+
896
+ Alternatively, the integration region can be specified by a
897
+ dictionary whose values ``map[key]`` are either 2-tuples or
898
+ arrays of 2-tuples corresponding to the lower and upper
899
+ integration limits for the corresponding variables. Then
900
+ integration points ``xd`` are packaged as dictionaries
901
+ having the same structure as ``map`` but with the integration
902
+ limits replaced by the values of the variables:
903
+ for example, ::
904
+
905
+ map = dict(r=(0, 1), phi=[(0, np.pi), (0, 2 * np.pi)])
906
+
907
+ indicates a three-dimensional integral over variables ``r``
908
+ (from ``0`` to ``1``), ``phi[0]`` (from ``0`` to ``np.pi``),
909
+ and ``phi[1]`` (from ``0`` to ``2*np.pi``). In this case
910
+ integrands ``f(xd)`` have dictionary arguments ``xd`` where
911
+ ``xd['r']``, ``xd['phi'][0]``, and ``xd['phi'][1]``
912
+ correspond to the integration variables.
913
+
914
+ Finally ``map`` could be the integration map from
915
+ another |Integrator|, or that |Integrator|
916
+ itself. In this case the grid is copied from the
917
+ existing integrator.
918
+ uses_jac (bool): Setting ``uses_jac=True`` causes |vegas| to
919
+ call the integrand with two arguments: ``fcn(x, jac=jac)``.
920
+ The second argument is the Jacobian ``jac[d] = dx[d]/dy[d]``
921
+ of the |vegas| map. The integral over ``x[d]`` of ``1/jac[d]``
922
+ equals 1 (exactly). The default setting
923
+ is ``uses_jac=False``.
924
+ nitn (positive int): The maximum number of iterations used to
925
+ adapt to the integrand and estimate its value. The
926
+ default value is 10; typical values range from 10
927
+ to 20.
928
+ neval (positive int): Approximate number of integrand evaluations
929
+ in each iteration of the |vegas| algorithm. Increasing
930
+ ``neval`` increases the precision: statistical errors should
931
+ fall at least as fast as ``sqrt(1./neval)`` and often
932
+ fall much faster. The default value is 1000;
933
+ real problems often require 10--10,000 times more evaluations
934
+ than this.
935
+ nstrat (int array): ``nstrat[d]`` specifies the number of
936
+ strata to use in direction ``d``. By default this
937
+ parameter is set automatically, based on parameter ``neval``,
938
+ with ``nstrat[d]`` approximately the same for every ``d``.
939
+ Specifying ``nstrat`` explicitly makes it possible to
940
+ concentrate strata in directions where they are most
941
+ needed. If ``nstrat`` is set but ``neval`` is not,
942
+ ``neval`` is set equal to ``2*prod(nstrat)/(1-neval_frac)``.
943
+ alpha (float): Damping parameter controlling the remapping
944
+ of the integration variables as |vegas| adapts to the
945
+ integrand. Smaller values slow adaptation, which may be
946
+ desirable for difficult integrands. Small or zero ``alpha``\s
947
+ are also sometimes useful after the grid has adapted,
948
+ to minimize fluctuations away from the optimal grid.
949
+ The default value is 0.5.
950
+ beta (float): Damping parameter controlling the redistribution
951
+ of integrand evaluations across hypercubes in the
952
+ stratified sampling of the integral (over transformed
953
+ variables). Smaller values limit the amount of
954
+ redistribution. The theoretically optimal value is 1;
955
+ setting ``beta=0`` prevents any redistribution of
956
+ evaluations. The default value is 0.75.
957
+ neval_frac (float): Approximate fraction of function evaluations
958
+ used for adaptive stratified sampling. |vegas|
959
+ distributes ``(1-neval_frac)*neval`` integrand evaluations
960
+ uniformly over all hypercubes, with at least 2 evaluations
961
+ per hypercube. The remaining ``neval_frac*neval``
962
+ evaluations are concentrated in hypercubes where the errors
963
+ are largest. Increasing ``neval_frac`` makes more integrand
964
+ evaluations available for adaptive stratified
965
+ sampling, but reduces the number of hypercubes, which limits
966
+ the algorithm's ability to adapt. Ignored when ``beta=0``.
967
+ Default is ``neval_frac=0.75``.
968
+ adapt (bool): Setting ``adapt=False`` prevents further
969
+ adaptation by |vegas|. Typically this would be done
970
+ after training the |Integrator| on an integrand, in order
971
+ to stabilize further estimates of the integral. |vegas| uses
972
+ unweighted averages to combine results from different
973
+ iterations when ``adapt=False``. The default setting
974
+ is ``adapt=True``.
975
+ nproc (int or None): Number of processes/processors used
976
+ to evalute the integrand. If ``nproc>1`` Python's
977
+ :mod:`multiprocessing` module is used to spread
978
+ integration points across multiple processors, thereby
979
+ potentially reducing the time required to evaluate the
980
+ integral. There is a significant overhead involved in using
981
+ multiple processors so this option is useful only when
982
+ the integrand is expensive to evaluate. When using the
983
+ :mod:`multiprocessing` module in its default mode for
984
+ MacOS and Windows, it is important that the main module
985
+ can be safely imported (i.e., without launching new
986
+ processes). This can be accomplished with
987
+ some version of the ``if __name__ == '__main__`:``
988
+ construct in the main module: e.g., ::
989
+
990
+ if __name__ == '__main__':
991
+ main()
992
+
993
+ This is not an issue on other Unix platforms.
994
+ See the :mod:`multiprocessing` documentation
995
+ for more information. Note that setting ``nproc``
996
+ greater than 1 disables MPI support.
997
+ Set ``nproc=None`` to use all the processors
998
+ on the machine (equivalent to ``nproc=os.cpu_count()``).
999
+ Default value is ``nproc=1``. (Requires Python 3.3 or later.)
1000
+
1001
+ Note that ``nproc`` has nothing to do with MPI support.
1002
+ The number of MPI processors is specified outside Python
1003
+ (via, for example, ``mpirun -np 8 python script.py`` on
1004
+ the command line).
1005
+ correlate_integrals (bool): If ``True`` (default), |vegas| calculates
1006
+ correlations between the different integrals when doing multiple
1007
+ integrals simultaneously. If ``False``, |vegas| sets the correlations
1008
+ to zero.
1009
+ analyzer: An object with methods
1010
+
1011
+ ``analyzer.begin(itn, integrator)``
1012
+
1013
+ ``analyzer.end(itn_result, result)``
1014
+
1015
+ where: ``begin(itn, integrator)`` is called at the start
1016
+ of each |vegas| iteration with ``itn`` equal to the
1017
+ iteration number and ``integrator`` equal to the
1018
+ integrator itself; and ``end(itn_result, result)``
1019
+ is called at the end of each iteration with
1020
+ ``itn_result`` equal to the result for that
1021
+ iteration and ``result`` equal to the cummulative
1022
+ result of all iterations so far.
1023
+ Setting ``analyzer=vegas.reporter()``, for
1024
+ example, causes vegas to print out a running report
1025
+ of its results as they are produced. The default
1026
+ is ``analyzer=None``.
1027
+ min_neval_batch (positive int): The minimum number of integration
1028
+ points to be passed together to the integrand when using
1029
+ |vegas| in batch mode. The default value is 100,000. Larger
1030
+ values may be lead to faster evaluations, but at the cost of
1031
+ more memory for internal work arrays. Batch sizes are all smaller
1032
+ than the lesser of ``min_neval_batch + max_neval_hcube`` and
1033
+ ``neval``. The last batch is usually smaller than this limit,
1034
+ as it is limited by ``neval``.
1035
+ max_neval_hcube (positive int): Maximum number of integrand
1036
+ evaluations per hypercube in the stratification. The default
1037
+ value is 50,000. Larger values might allow for more adaptation
1038
+ (when ``beta>0``), but also allow for more over-shoot when
1039
+ adapting to sharp peaks. Larger values also can result in
1040
+ large internal work arrasy.
1041
+ gpu_pad (bool): If ``True``, |vegas| batches are padded so that
1042
+ they are all the same size. The extra integrand evaluations
1043
+ for integration points in the pad are discarded; increase
1044
+ ``min_neval_batch`` or reduce ``max_neval_hcube`` to
1045
+ decrease the number of evaluations that are discarded.
1046
+ Padding is usually minimal when ``min_neval_batch`` is
1047
+ equal to or larger than ``neval``. Padding can make
1048
+ GPU-based integrands work much faster, but it makes
1049
+ other types of integrand run more slowly.
1050
+ Default is ``False``.
1051
+ minimize_mem (bool): When ``True``, |vegas| minimizes
1052
+ internal workspace by moving some of its data to
1053
+ a disk file. This increases execution time (slightly)
1054
+ and results in temporary files, but might be desirable
1055
+ when the number of evaluations is very large (e.g.,
1056
+ ``neval=1e9``). ``minimize_mem=True``
1057
+ requires the ``h5py`` Python module.
1058
+ max_mem (positive float): Maximum number of floats allowed in
1059
+ internal work arrays (approx.). A ``MemoryError`` is
1060
+ raised if the work arrays are too large, in which case
1061
+ one might want to reduce ``min_neval_batch`` or
1062
+ ``max_neval_hcube``, or set ``minimize_mem=True``
1063
+ (or increase ``max_mem`` if there is enough RAM).
1064
+ Default value is 1e9.
1065
+ maxinc_axis (positive int): The maximum number of increments
1066
+ per axis allowed for the |x|-space grid. The default
1067
+ value is 1000; there is probably little need to use
1068
+ other values.
1069
+ rtol (float): Relative error in the integral estimate
1070
+ at which point the integrator can stop. The default
1071
+ value is 0.0 which turns off this stopping condition.
1072
+ This stopping condition can be quite unreliable
1073
+ in early iterations, before |vegas| has converged.
1074
+ Use with caution, if at all.
1075
+ atol (float): Absolute error in the integral estimate
1076
+ at which point the integrator can stop. The default
1077
+ value is 0.0 which turns off this stopping condition.
1078
+ This stopping condition can be quite unreliable
1079
+ in early iterations, before |vegas| has converged.
1080
+ Use with caution, if at all.
1081
+ ran_array_generator: Replacement function for the default
1082
+ random number generator. ``ran_array_generator(size)``
1083
+ should create random numbers uniformly distributed
1084
+ between 0 and 1 in an array whose dimensions are specified by the
1085
+ integer-valued tuple ``size``. Setting ``ran_array_generator``
1086
+ to ``None`` restores the default generator (from :mod:`gvar`).
1087
+ sync_ran (bool): If ``True`` (default), the *default* random
1088
+ number generator is synchronized across all processors when
1089
+ using MPI. If ``False``, |vegas| does no synchronization
1090
+ (but the random numbers should synchronized some other
1091
+ way). Ignored if not using MPI.
1092
+ adapt_to_errors (bool):
1093
+ ``adapt_to_errors=False`` causes |vegas| to remap the
1094
+ integration variables to emphasize regions where ``|f(x)|``
1095
+ is largest. This is the default mode.
1096
+
1097
+ ``adapt_to_errors=True`` causes |vegas| to remap
1098
+ variables to emphasize regions where the Monte Carlo
1099
+ error is largest. This might be superior when
1100
+ the number of the number of strata (``self.nstrat``)
1101
+ in the |y| grid is large (> 100). It is typically
1102
+ useful only in one or two dimensions.
1103
+ uniform_nstrat (bool): If ``True``, requires that the
1104
+ ``nstrat[d]`` be equal for all ``d``. If ``False`` (default),
1105
+ the algorithm maximizes the number of strata while
1106
+ requiring ``|nstrat[d1] - nstrat[d2]| <= 1``. This parameter
1107
+ is ignored if ``nstrat`` is specified explicitly.
1108
+ mpi (bool): Setting ``mpi=False`` (default) disables ``mpi`` support in
1109
+ ``vegas`` even if ``mpi`` is available; setting ``mpi=True``
1110
+ allows use of ``mpi`` provided module :mod:`mpi4py`
1111
+ is installed.
1112
+ """
1113
+
1114
+ # Settings accessible via the constructor and Integrator.set
1115
+ defaults = dict(
1116
+ map=None, # integration region, AdaptiveMap, or Integrator
1117
+ neval=1000, # number of evaluations per iteration
1118
+ maxinc_axis=1000, # number of adaptive-map increments per axis
1119
+ min_neval_batch=100000, # min. number of evaluations per batch
1120
+ max_neval_hcube=50000, # max number of evaluations per h-cube
1121
+ gpu_pad=False, # pad batches for use by GPUs
1122
+ neval_frac=0.75, # fraction of evaluations used for adaptive stratified sampling
1123
+ max_mem=1e9, # memory cutoff (# of floats)
1124
+ nitn=10, # number of iterations
1125
+ alpha=0.5, # damping parameter for importance sampling
1126
+ beta=0.75, # damping parameter for stratified sampliing
1127
+ adapt=True, # flag to turn adaptation on or off
1128
+ correlate_integrals=True,# calculate correlations between different integrals for multi-dimensional integrands
1129
+ minimize_mem=False, # minimize work memory (when neval very large)?
1130
+ adapt_to_errors=False, # alternative approach to stratified sampling (low dim)?
1131
+ uniform_nstrat=False, # require same nstrat[d] for all directions d?
1132
+ rtol=0, # relative error tolerance
1133
+ atol=0, # absolute error tolerance
1134
+ analyzer=None, # analyzes results from each iteration
1135
+ ran_array_generator=None, # alternative random number generator
1136
+ sync_ran=True, # synchronize random generators across MPI processes?
1137
+ mpi=False, # allow MPI?
1138
+ uses_jac=False, # return Jacobian to integrand?
1139
+ nproc=1, # number of processors to use
1140
+ )
1141
+
1142
+ def __init__(Integrator self not None, map, **kargs):
1143
+ # N.B. All attributes initialized automatically by cython.
1144
+ # This is why self.set() works here.
1145
+ self.neval_hcube_range = None
1146
+ self.last_neval = 0
1147
+ self.pool = None
1148
+ self.sigf_h5 = None
1149
+ if isinstance(map, Integrator):
1150
+ self._set_map(map)
1151
+ args = {}
1152
+ for k in Integrator.defaults:
1153
+ if k != 'map':
1154
+ args[k] = getattr(map, k)
1155
+ # following not in Integrator.defaults
1156
+ self.sigf = numpy.array(map.sigf)
1157
+ self.sum_sigf = numpy.sum(self.sigf)
1158
+ self.nstrat = numpy.array(map.nstrat)
1159
+ if 'nstrat' not in kargs:
1160
+ kargs['nstrat'] = map.nstrat
1161
+ if 'neval' not in kargs:
1162
+ kargs['neval'] = map.neval
1163
+ else:
1164
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1165
+ self.sum_sigf = HUGE
1166
+ args = dict(Integrator.defaults)
1167
+ if 'map' in args:
1168
+ del args['map']
1169
+ self._set_map(map)
1170
+ self.nstrat = numpy.full(self.map.dim, 0, dtype=numpy.intp) # dummy (flags action in self.set())
1171
+ args.update(kargs)
1172
+ if 'nstrat' in kargs and 'neval' not in kargs and 'neval' in args:
1173
+ del args['neval']
1174
+ if 'neval' in kargs and 'nstrat' not in kargs and 'nstrat' in args:
1175
+ del args['nstrat']
1176
+ self.set(args)
1177
+
1178
+ def __del__(self):
1179
+ self._clear_sigf_h5()
1180
+ if self.pool is not None:
1181
+ self.pool.close()
1182
+ self.pool.join()
1183
+ self.pool = None
1184
+
1185
+ def _clear_sigf_h5(self):
1186
+ if self.sigf_h5 is not None:
1187
+ fname = self.sigf_h5.filename
1188
+ self.sigf_h5.close()
1189
+ os.unlink(fname)
1190
+ self.sigf_h5 = None
1191
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1192
+ self.sum_sigf = HUGE
1193
+
1194
+ def __reduce__(Integrator self not None):
1195
+ r""" Capture state for pickling. """
1196
+ odict = dict()
1197
+ for k in Integrator.defaults:
1198
+ if k in ['map']:
1199
+ continue
1200
+ odict[k] = getattr(self, k)
1201
+ odict['nstrat'] = numpy.asarray(self.nstrat)
1202
+ odict['sigf'] = numpy.asarray(self.sigf)
1203
+ return (Integrator, (self.map,), odict)
1204
+
1205
+ def __setstate__(Integrator self not None, odict):
1206
+ r""" Set state for unpickling. """
1207
+ self.set(odict)
1208
+
1209
+ def _set_map(self, map):
1210
+ r""" install new map, create xsample """
1211
+ if isinstance(map, AdaptiveMap):
1212
+ self.map = AdaptiveMap(map)
1213
+ self.xsample = numpy.empty(self.map.dim, dtype=float)
1214
+ for d in range(self.map.dim):
1215
+ self.xsample[d] = gvar.RNG.uniform(*self.map.region(d))
1216
+ elif isinstance(map, Integrator):
1217
+ self.map = AdaptiveMap(map.map)
1218
+ self.xsample = (
1219
+ gvar.BufferDict(map.xsample)
1220
+ if map.xsample.shape is None else
1221
+ numpy.array(map.xsample)
1222
+ )
1223
+ else:
1224
+ if hasattr(map, 'keys'):
1225
+ map = gvar.asbufferdict(map)
1226
+ self.xsample = gvar.BufferDict()
1227
+ limits = []
1228
+ for k in map:
1229
+ shape = map[k].shape[:-1]
1230
+ if shape == ():
1231
+ self.xsample[k] = gvar.RNG.uniform(*map[k])
1232
+ limits.append(map[k])
1233
+ else:
1234
+ self.xsample[k] = numpy.empty(shape, dtype=float)
1235
+ for idx in numpy.ndindex(shape):
1236
+ self.xsample[k][idx] = gvar.RNG.uniform(*map[k][idx])
1237
+ limits += numpy.array(map[k]).reshape(-1,2).tolist()
1238
+ self.map = AdaptiveMap(limits)
1239
+ else:
1240
+ # need to allow for possibility that map is a grid with differeing numbers of
1241
+ # nodes in different directions; do this with the dtype=object in following
1242
+ map = numpy.array(map, dtype=object)
1243
+ if numpy.shape(map.flat[0]) == ():
1244
+ # homogeneous array
1245
+ self.xsample = numpy.empty(map.shape[:-1], dtype=float)
1246
+ grid = map.reshape(-1, 2)
1247
+ else:
1248
+ # heterogeneous array
1249
+ self.xsample = numpy.empty(map.shape, dtype=float)
1250
+ grid = map.reshape(-1)
1251
+ self.map = AdaptiveMap(grid)
1252
+ for i, idx in enumerate(numpy.ndindex(self.xsample.shape)):
1253
+ self.xsample[idx] = gvar.RNG.uniform(*self.map.region(i))
1254
+
1255
+
1256
+ def set(Integrator self not None, ka={}, **kargs):
1257
+ r""" Reset default parameters in integrator.
1258
+
1259
+ Usage is analogous to the constructor
1260
+ for |Integrator|: for example, ::
1261
+
1262
+ old_defaults = integ.set(neval=1e6, nitn=20)
1263
+
1264
+ resets the default values for ``neval`` and ``nitn``
1265
+ in |Integrator| ``integ``. A dictionary, here
1266
+ ``old_defaults``, is returned. It can be used
1267
+ to restore the old defaults using, for example::
1268
+
1269
+ integ.set(old_defaults)
1270
+ """
1271
+ # 1) reset parameters
1272
+ if kargs:
1273
+ kargs.update(ka)
1274
+ else:
1275
+ kargs = ka
1276
+ old_val = dict() # records anything that is changed
1277
+ nstrat = None
1278
+ for k in kargs:
1279
+ if k == 'map':
1280
+ old_val['map'] = self.map
1281
+ self._set_map(kargs['map'])
1282
+ elif k == 'nstrat':
1283
+ if kargs['nstrat'] is None:
1284
+ continue
1285
+ old_val['nstrat'] = self.nstrat
1286
+ nstrat = numpy.array(kargs['nstrat'], dtype=numpy.intp)
1287
+ elif k == 'sigf':
1288
+ old_val['sigf'] = self.sigf
1289
+ self.sigf = numpy.fabs(kargs['sigf'])
1290
+ self.sum_sigf = numpy.sum(self.sigf)
1291
+ elif k == 'nproc':
1292
+ old_val['nproc'] = self.nproc
1293
+ self.nproc = kargs['nproc'] if kargs['nproc'] is not None else os.cpu_count()
1294
+ if self.nproc is None:
1295
+ self.nproc = 1
1296
+ if self.nproc != old_val['nproc']:
1297
+ if self.pool is not None:
1298
+ self.pool.close()
1299
+ self.pool.join()
1300
+ if self.nproc != 1:
1301
+ try:
1302
+ self.pool = multiprocessing.Pool(processes=self.nproc)
1303
+ except:
1304
+ self.nproc = 1
1305
+ self.pool = None
1306
+ else:
1307
+ self.pool = None
1308
+ elif k in Integrator.defaults:
1309
+ # ignore entry if set to None (useful for debugging)
1310
+ # if kargs[k] is None:
1311
+ # continue
1312
+ old_val[k] = getattr(self, k)
1313
+ try:
1314
+ setattr(self, k, kargs[k])
1315
+ except:
1316
+ setattr(self, k, type(old_val[k])(kargs[k]))
1317
+ elif k not in ['nhcube_batch', 'max_nhcube']:
1318
+ # ignore legacy parameters, but raise error for others
1319
+ raise AttributeError('no parameter named "%s"' % str(k))
1320
+
1321
+ # 2) sanity checks
1322
+ if nstrat is not None:
1323
+ if len(nstrat) != self.map.dim:
1324
+ raise ValueError('nstrat[d] has wrong length: %d not %d' % (len(nstrat), self.map.dim))
1325
+ if numpy.any(nstrat < 1):
1326
+ raise ValueError('bad nstrat: ' + str(numpy.asarray(self.nstrat)))
1327
+ if self.neval_frac < 0 or self.neval_frac >= 1:
1328
+ raise ValueError('neval_frac = {} but require 0 <= neval_frac < 1'.format(self.neval_frac))
1329
+ if 'neval' in old_val and self.neval < 2:
1330
+ raise ValueError('neval>2 required, not ' + str(self.neval))
1331
+ neval_frac = 0 if (self.beta == 0 or self.adapt_to_errors) else self.neval_frac
1332
+
1333
+ self.dim = self.map.dim
1334
+
1335
+ # 3) determine # strata in each direction
1336
+ if nstrat is not None:
1337
+ # nstrat specified explicitly
1338
+ if len(nstrat) != self.dim or min(nstrat) < 1:
1339
+ raise ValueError('bad nstrat = %s' % str(numpy.asarray(nstrat)))
1340
+ nhcube = numpy.prod(nstrat)
1341
+ if 'neval' not in old_val:
1342
+ old_val['neval'] = self.neval
1343
+ self.neval = type(self.neval)(2. * nhcube / (1. - neval_frac))
1344
+ elif self.neval < 2. * nhcube / (1. - neval_frac):
1345
+ raise ValueError('neval too small: {} < {}'.format(self.neval, 2. * nhcube / (1. - neval_frac)))
1346
+ elif 'neval' in old_val or 'neval_frac' in old_val: ##### or 'max_nhcube' in old_val:
1347
+ # determine stratification from neval,neval_frac if either was specified
1348
+ ns = int(abs((1 - neval_frac) * self.neval / 2.) ** (1. / self.dim)) # strata / axis
1349
+ if ns < 1:
1350
+ ns = 1
1351
+ d = int(
1352
+ (numpy.log((1 - neval_frac) * self.neval / 2.) - self.dim * numpy.log(ns))
1353
+ / numpy.log(1 + 1. / ns)
1354
+ )
1355
+ if ((ns + 1)**d * ns**(self.dim-d)) > self.max_mem and not self.minimize_mem:
1356
+ raise MemoryError("work arrays larger than max_mem; set minimize_mem=True (and install h5py module) or increase max_mem")
1357
+ # ns = int(abs(self.max_nhcube) ** abs(1. / self.dim))
1358
+ # d = int(
1359
+ # (numpy.log(self.max_nhcube) - self.dim * numpy.log(ns))
1360
+ # / numpy.log(1 + 1. / ns)
1361
+ # )
1362
+ if self.uniform_nstrat:
1363
+ d = 0
1364
+ nstrat = numpy.empty(self.dim, numpy.intp)
1365
+ nstrat[:d] = ns + 1
1366
+ nstrat[d:] = ns
1367
+ else:
1368
+ # go with existing grid if none of nstrat, neval and neval_frac changed
1369
+ nstrat = self.nstrat
1370
+
1371
+ # 4) reconfigure vegas map, if necessary
1372
+ if self.adapt_to_errors:
1373
+ self.map.adapt(ninc=numpy.asarray(nstrat))
1374
+ else:
1375
+ ni = min(int(self.neval / 10.), self.maxinc_axis) # increments/axis
1376
+ ninc = numpy.empty(self.dim, numpy.intp)
1377
+ for d in range(self.dim):
1378
+ if ni >= nstrat[d]:
1379
+ ninc[d] = int(ni / nstrat[d]) * nstrat[d]
1380
+ elif nstrat[d] <= self.maxinc_axis:
1381
+ ninc[d] = nstrat[d]
1382
+ else:
1383
+ nstrat[d] = int(nstrat[d] / ni) * ni
1384
+ ninc[d] = ni
1385
+ if not numpy.all(numpy.equal(self.map.ninc, ninc)):
1386
+ self.map.adapt(ninc=ninc)
1387
+
1388
+ if not numpy.all(numpy.equal(self.nstrat, nstrat)):
1389
+ if 'sigf' not in old_val:
1390
+ # need to recalculate stratification distribution for beta>0
1391
+ # unless a new sigf was set
1392
+ old_val['sigf'] = self.sigf
1393
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1394
+ self.sum_sigf = HUGE
1395
+ self.nstrat = nstrat
1396
+
1397
+ # 5) set min_neval_hcube
1398
+ # chosen so that actual neval is close to but not larger than self.neval
1399
+ # (unless self.minimize_mem is True in which case it could be larger)
1400
+ self.nhcube = numpy.prod(self.nstrat, dtype=type(self.nhcube))
1401
+ avg_neval_hcube = int(self.neval / self.nhcube)
1402
+ if self.nhcube == 1:
1403
+ self.min_neval_hcube = int(self.neval)
1404
+ else:
1405
+ self.min_neval_hcube = int((1 - neval_frac) * self.neval / self.nhcube)
1406
+ if self.min_neval_hcube < 2:
1407
+ self.min_neval_hcube = 2
1408
+
1409
+ # 6) allocate work arrays -- these are stored in the
1410
+ # the Integrator so that the storage is held between
1411
+ # iterations, thereby minimizing the amount of allocating
1412
+ # that goes on
1413
+
1414
+ # neval_batch = self.nhcube_batch * avg_neval_hcube
1415
+ nsigf = self.nhcube
1416
+ if self.beta >= 0 and self.nhcube > 1 and not self.adapt_to_errors and len(self.sigf) != nsigf:
1417
+ # set up sigf
1418
+ self._clear_sigf_h5()
1419
+ if not self.minimize_mem:
1420
+ self.sigf = numpy.ones(nsigf, float)
1421
+ else:
1422
+ try:
1423
+ import h5py
1424
+ except ImportError:
1425
+ raise ValueError("Install the h5py Python module in order to use minimize_mem=True")
1426
+ self.sigf_h5 = h5py.File(tempfile.mkstemp(dir='.', prefix='vegastmp_')[1], 'a')
1427
+ self.sigf_h5.create_dataset('sigf', shape=(nsigf,), dtype=float, chunks=True, fillvalue=1.)
1428
+ self.sigf = self.sigf_h5['sigf']
1429
+ self.sum_sigf = nsigf
1430
+ self.neval_hcube = numpy.empty(self.min_neval_batch // 2 + 1, dtype=numpy.intp)
1431
+ self.neval_hcube[:] = avg_neval_hcube
1432
+ # allocate work space
1433
+ # self.y = numpy.empty((self.min_neval_batch, self.dim), float)
1434
+ # self.x = numpy.empty((self.min_neval_batch, self.dim), float)
1435
+ # self.jac = numpy.empty(self.min_neval_batch, float)
1436
+ # self.fdv2 = numpy.empty(self.min_neval_batch, float)
1437
+ workspace = self.min_neval_batch + self.max_neval_hcube
1438
+ if workspace > self.neval:
1439
+ workspace = self.neval + 1
1440
+ if (3*self.dim + 3) * workspace + (0 if self.minimize_mem else self.nhcube) > self.max_mem:
1441
+ raise MemoryError('work arrays larger than max_mem; reduce min_neval_batch or max_neval_hcube (or increase max_mem)')
1442
+ self.y = numpy.empty((workspace, self.dim), float)
1443
+ self.x = numpy.empty((workspace, self.dim), float)
1444
+ self.jac = numpy.empty(workspace, float)
1445
+ self.fdv2 = numpy.empty(workspace, float)
1446
+ return old_val
1447
+
1448
+ def settings(Integrator self not None, ngrid=0):
1449
+ r""" Assemble summary of integrator settings into string.
1450
+
1451
+ Args:
1452
+ ngrid (int): Number of grid nodes in each direction
1453
+ to include in summary.
1454
+ The default is 0.
1455
+ Returns:
1456
+ String containing the settings.
1457
+ """
1458
+ cdef Py_ssize_t d
1459
+ nhcube = numpy.prod(self.nstrat)
1460
+ neval = nhcube * self.min_neval_hcube if self.beta <= 0 else self.neval
1461
+ ans = "Integrator Settings:\n"
1462
+ if self.beta > 0 and not self.adapt_to_errors:
1463
+ ans = ans + (
1464
+ " %.6g (approx) integrand evaluations in each of %d iterations\n"
1465
+ % (self.neval, self.nitn)
1466
+ )
1467
+ else:
1468
+ ans = ans + (
1469
+ " %.6g integrand evaluations in each of %d iterations\n"
1470
+ % (neval, self.nitn)
1471
+ )
1472
+ ans = ans + (
1473
+ " number of: strata/axis = %s\n" %
1474
+ numpy.array2string(numpy.asarray(self.nstrat), max_line_width=80, prefix=29 * ' ')
1475
+ )
1476
+ ans = ans + (
1477
+ " increments/axis = %s\n" %
1478
+ numpy.array2string(numpy.asarray(self.map.ninc), max_line_width=80, prefix=33 * ' ')
1479
+ )
1480
+ ans = ans + (
1481
+ " h-cubes = %.6g processors = %d\n"
1482
+ % (nhcube, self.nproc)
1483
+ )
1484
+ max_neval_hcube = max(self.max_neval_hcube, self.min_neval_hcube)
1485
+ ans = ans + (
1486
+ " evaluations/batch >= %.2g\n"
1487
+ % (float(self.min_neval_batch),)
1488
+ )
1489
+ ans = ans + (
1490
+ " %d <= evaluations/h-cube <= %.2g\n"
1491
+ % (int(self.min_neval_hcube), float(max_neval_hcube))
1492
+ )
1493
+ ans = ans + (
1494
+ " minimize_mem = %s adapt_to_errors = %s adapt = %s\n"
1495
+ % (str(self.minimize_mem), str(self.adapt_to_errors), str(self.adapt))
1496
+ )
1497
+ ans = ans + (" accuracy: relative = %g absolute = %g\n" % (self.rtol, self.atol))
1498
+ if not self.adapt:
1499
+ ans = ans + (
1500
+ " damping: alpha = %g beta= %g\n\n"
1501
+ % (0., 0.)
1502
+ )
1503
+ elif self.adapt_to_errors:
1504
+ ans = ans + (
1505
+ " damping: alpha = %g beta= %g\n\n"
1506
+ % (self.alpha, 0.)
1507
+ )
1508
+ else:
1509
+ ans = ans + (
1510
+ " damping: alpha = %g beta= %g\n\n"
1511
+ % (self.alpha, self.beta)
1512
+ )
1513
+
1514
+ # add integration limits
1515
+ offset = 4 * ' '
1516
+ entries = []
1517
+ axis = 0
1518
+ # self.limits = self.limits.buf.reshape(-1,2)
1519
+ # format limits (5 digits)
1520
+ limits = list(self.map.region())
1521
+ for i in range(len(limits)):
1522
+ limits[i] = '({:.5}, {:.5})'.format(*limits[i])
1523
+ if self.xsample.shape is None:
1524
+ for k in self.xsample:
1525
+ if self.xsample[k].shape == ():
1526
+ entries.append((str(k), str(axis), str(limits[axis])))
1527
+ axis += 1
1528
+ else:
1529
+ prefix = str(k) + ' '
1530
+ for idx in numpy.ndindex(self.xsample[k].shape):
1531
+ str_idx = str(idx)[1:-1]
1532
+ str_idx = ''.join(str_idx.split(' '))
1533
+ if str_idx[-1] == ',':
1534
+ str_idx = str_idx[:-1]
1535
+ entries.append((prefix + str_idx, str(axis), str(limits[axis])))
1536
+ if prefix != '':
1537
+ prefix = '' # (len(str(k)) + 1) * ' '
1538
+ axis += 1
1539
+ linefmt = '{e0:>{w0}} {e1:>{w1}} {e2:>{w2}}'
1540
+ headers = ('key/index', 'axis', 'integration limits')
1541
+ w0 = max(len(ei[0]) for ei in entries)
1542
+ elif len(self.xsample.shape) > 1:
1543
+ for idx in numpy.ndindex(self.xsample.shape):
1544
+ str_idx = str(idx)[1:-1]
1545
+ str_idx = ''.join(str_idx.split(' '))
1546
+ if str_idx[-1] == ',':
1547
+ str_idx = str_idx[:-1]
1548
+ entries.append((str_idx, str(axis), str(limits[axis])))
1549
+ axis += 1
1550
+ linefmt = '{e0:>{w0}} {e1:>{w1}} {e2:>{w2}}'
1551
+ headers = ('key/index', 'axis', 'integration limits')
1552
+ w0 = max(len(ei[0]) for ei in entries)
1553
+ else:
1554
+ for axis,limits_axis in enumerate(limits):
1555
+ entries.append((None, str(axis), str(limits_axis)))
1556
+ linefmt = '{e1:>{w1}} {e2:>{w2}}'
1557
+ headers = (None, 'axis', 'integration limits')
1558
+ w0 = None
1559
+ w1 = max(len(ei[1]) for ei in entries)
1560
+ w2 = max(len(ei[2]) for ei in entries)
1561
+ ncol = 1 if self.map.dim <= 20 else 2
1562
+ table = ncol * [[]]
1563
+ nl = len(entries) // ncol
1564
+ if nl * ncol < len(entries):
1565
+ nl += 1
1566
+ ns = len(entries) - (ncol - 1) * nl
1567
+ ne = (ncol -1) * [nl] + [ns]
1568
+ iter_entries = iter(entries)
1569
+ for col in range(ncol):
1570
+ e0, e1, e2 = headers
1571
+ w0 = None if e0 is None else max(len(e0), w0)
1572
+ w1 = max(len(e1), w1)
1573
+ w2 = max(len(e2), w2)
1574
+ table[col] = [linefmt.format(e0=e0, w0=w0, e1=e1, w1=w1, e2=e2, w2=w2)]
1575
+ table[col].append(len(table[col][0]) * '-')
1576
+ for ii in range(ne[col]):
1577
+ e0, e1, e2 = next(iter_entries)
1578
+ table[col].append(linefmt.format(e0=e0, w0=w0, e1=e1, w1=w1, e2=e2, w2=w2))
1579
+ mtable = []
1580
+ ns += 2
1581
+ nl += 2
1582
+ for i in range(ns):
1583
+ mtable.append(' '.join([tabcol[i] for tabcol in table]))
1584
+ for i in range(ns, nl):
1585
+ mtable.append(' '.join([tabcol[i] for tabcol in table[:-1]]))
1586
+ ans += offset + ('\n' + offset).join(mtable) + '\n'
1587
+ # add grid data
1588
+ if ngrid > 0:
1589
+ ans += '\n' + self.map.settings(ngrid=ngrid)
1590
+ return ans
1591
+
1592
+ def _get_mpi_rank(self):
1593
+ try:
1594
+ import mpi4py.MPI
1595
+ return mpi4py.MPI.COMM_WORLD.Get_rank()
1596
+ except ImportError:
1597
+ return 0
1598
+
1599
+ mpi_rank = property(_get_mpi_rank, doc="MPI rank (>=0)")
1600
+
1601
+ def random_batch(
1602
+ Integrator self not None,
1603
+ bint yield_hcube=False,
1604
+ bint yield_y=False,
1605
+ # fcn = None,
1606
+ ):
1607
+ r""" Low-level batch iterator over integration points and weights.
1608
+
1609
+ This method creates an iterator that returns integration
1610
+ points from |vegas|, and their corresponding weights in an
1611
+ integral. The points are provided in arrays ``x[i, d]`` where
1612
+ ``i=0...`` labels the integration points in a batch
1613
+ and ``d=0...`` labels direction. The corresponding
1614
+ weights assigned by |vegas| to each point are provided
1615
+ in an array ``wgt[i]``.
1616
+
1617
+ Optionally the integrator will also return the indices of
1618
+ the hypercubes containing the integration points and/or the |y|-space
1619
+ coordinates of those points::
1620
+
1621
+ integ.random_batch() yields x, wgt
1622
+
1623
+ integ.random_batch(yield_hcube=True) yields x, wgt, hcube
1624
+
1625
+ integ.random_batch(yield_y=True) yields x, y, wgt
1626
+
1627
+ integ.random_batch(yield_hcube=True, yield_y=True) yields x, y, wgt, hcube
1628
+
1629
+ The number of integration points returned by the iterator
1630
+ corresponds to a single iteration. The number in a batch
1631
+ is controlled by parameter ``nhcube_batch``.
1632
+ """
1633
+ for t in self._random_batch(yield_hcube, yield_y):
1634
+ yield tuple(numpy.array(ti) for ti in t)
1635
+
1636
+ def _random_batch(
1637
+ Integrator self not None,
1638
+ bint yield_hcube=False,
1639
+ bint yield_y=False,
1640
+ # fcn = None,
1641
+ ):
1642
+ r""" Underlying implementation of generator :meth:`Integrator.random_batch`.
1643
+
1644
+ Only difference from ``random_batch()`` is that the values for
1645
+ ``x``, ``y``, etc. are returned here as memoryviews into internal buffers
1646
+ that are overwritten by subsequent iterations. ``random_batch()`` returns
1647
+ copies of the views that are not overwritten. ``_random_batch()`` is used
1648
+ internally to minimize memory and memory churn.
1649
+ """
1650
+ cdef Py_ssize_t nhcube = numpy.prod(self.nstrat)
1651
+ cdef double dv_y = 1. / nhcube
1652
+ # cdef Py_ssize_t min_neval_batch #= min(self.min_neval_batch, nhcube)
1653
+ cdef Py_ssize_t neval_batch # self.neval_batch
1654
+ cdef Py_ssize_t hcube_base
1655
+ cdef Py_ssize_t i_start, ihcube, i, d, tmp_hcube, hcube
1656
+ cdef Py_ssize_t[::1] hcube_array
1657
+ cdef double neval_sigf = (
1658
+ self.neval_frac * self.neval / self.sum_sigf
1659
+ if self.beta > 0 and self.sum_sigf > 0 and not self.adapt_to_errors
1660
+ else 0.0 # use min_neval_hcube (should not happen ever)
1661
+ )
1662
+ cdef Py_ssize_t avg_neval_hcube = int(self.neval / self.nhcube)
1663
+ cdef Py_ssize_t min_neval_batch = self.min_neval_batch # min_neval_batch * avg_neval_hcube ####
1664
+ cdef Py_ssize_t max_nhcube_batch = min_neval_batch // 2 + 1 ####
1665
+ cdef Py_ssize_t[::1] neval_hcube = self.neval_hcube
1666
+ cdef Py_ssize_t[::1] y0 = numpy.empty(self.dim, numpy.intp)
1667
+ cdef Py_ssize_t max_neval_hcube = max(
1668
+ self.max_neval_hcube, self.min_neval_hcube
1669
+ )
1670
+ cdef double[::1] sigf
1671
+ cdef double[:, ::1] yran
1672
+ cdef double[:, ::1] y
1673
+ cdef double[:, ::1] x
1674
+ cdef double[::1] jac
1675
+ cdef bint adaptive_strat = (self.beta > 0 and nhcube > 1 and not self.adapt_to_errors)
1676
+ ran_array_generator = (
1677
+ gvar.RNG.random
1678
+ if self.ran_array_generator is None else
1679
+ self.ran_array_generator
1680
+ )
1681
+ self.last_neval = 0
1682
+ self.neval_hcube_range = numpy.zeros(2, numpy.intp) + self.min_neval_hcube
1683
+ if yield_hcube:
1684
+ hcube_array = numpy.empty(self.y.shape[0], numpy.intp)
1685
+ # if adaptive_strat and self.minimize_mem and not self.adapt:
1686
+ ##### believe this was wrong idea; want to preserve adaptive strat if it exists
1687
+ # # can't minimize_mem without also adapting, so force beta=0
1688
+ # neval_sigf = 0.0
1689
+ neval_batch = 0
1690
+ hcube_base = 0
1691
+ sigf = self.sigf[hcube_base:hcube_base + max_nhcube_batch]
1692
+ for hcube in range(nhcube):
1693
+ ihcube = hcube - hcube_base
1694
+ # determine number of evaluations for h-cube
1695
+ if adaptive_strat:
1696
+ neval_hcube[ihcube] = <int> (sigf[ihcube] * neval_sigf) + self.min_neval_hcube
1697
+ if neval_hcube[ihcube] > max_neval_hcube:
1698
+ neval_hcube[ihcube] = max_neval_hcube
1699
+ if neval_hcube[ihcube] < self.neval_hcube_range[0]:
1700
+ self.neval_hcube_range[0] = neval_hcube[ihcube]
1701
+ elif neval_hcube[ihcube] > self.neval_hcube_range[1]:
1702
+ self.neval_hcube_range[1] = neval_hcube[ihcube]
1703
+ neval_batch += neval_hcube[ihcube]
1704
+ else:
1705
+ neval_hcube[ihcube] = avg_neval_hcube
1706
+ neval_batch += avg_neval_hcube
1707
+
1708
+ if neval_batch < min_neval_batch and hcube < nhcube - 1:
1709
+ # don't have enough points yet
1710
+ continue
1711
+
1712
+ ############################## have enough points => build yields
1713
+ self.last_neval += neval_batch
1714
+ nhcube_batch = hcube - hcube_base + 1
1715
+ # if (3*self.dim + 3) * neval_batch * 2 > self.max_mem:
1716
+ # raise MemoryError('work arrays larger than max_mem; reduce min_neval_batch or max_neval_hcube (or increase max_mem)')
1717
+
1718
+ # 1) resize work arrays if needed (to double what is needed)
1719
+ if neval_batch > self.y.shape[0]:
1720
+ print("XXX - shouldn't get here ever")
1721
+ self.y = numpy.empty((2 * neval_batch, self.dim), float)
1722
+ self.x = numpy.empty((2 * neval_batch, self.dim), float)
1723
+ self.jac = numpy.empty(2 * neval_batch, float)
1724
+ self.fdv2 = numpy.empty(2 * neval_batch, float)
1725
+ y = self.y
1726
+ x = self.x
1727
+ jac = self.jac
1728
+ if yield_hcube and neval_batch > hcube_array.shape[0]:
1729
+ hcube_array = numpy.empty(2 * neval_batch, numpy.intp)
1730
+
1731
+ # 2) generate random points
1732
+ yran = ran_array_generator((neval_batch, self.dim))
1733
+ i_start = 0
1734
+ for ihcube in range(nhcube_batch):
1735
+ tmp_hcube = hcube_base + ihcube
1736
+ for d in range(self.dim):
1737
+ y0[d] = tmp_hcube % self.nstrat[d]
1738
+ tmp_hcube = (tmp_hcube - y0[d]) // self.nstrat[d]
1739
+ for d in range(self.dim):
1740
+ for i in range(i_start, i_start + neval_hcube[ihcube]):
1741
+ y[i, d] = (y0[d] + yran[i, d]) / self.nstrat[d]
1742
+ i_start += neval_hcube[ihcube]
1743
+ self.map.map(y, x, jac, neval_batch)
1744
+
1745
+ # 3) compute weights and yield answers
1746
+ i_start = 0
1747
+ for ihcube in range(nhcube_batch):
1748
+ for i in range(i_start, i_start + neval_hcube[ihcube]):
1749
+ jac[i] *= dv_y / neval_hcube[ihcube]
1750
+ if yield_hcube:
1751
+ hcube_array[i] = hcube_base + ihcube
1752
+ i_start += neval_hcube[ihcube]
1753
+ answer = (x[:neval_batch, :],)
1754
+ if yield_y:
1755
+ answer += (y[:neval_batch, :],)
1756
+ answer += (jac[:neval_batch],)
1757
+ if yield_hcube:
1758
+ answer += (hcube_array[:neval_batch],)
1759
+ yield answer
1760
+
1761
+ # reset parameters for main loop
1762
+ if hcube < nhcube - 1:
1763
+ neval_batch = 0
1764
+ hcube_base = hcube + 1
1765
+ sigf = self.sigf[hcube_base:hcube_base + max_nhcube_batch]
1766
+
1767
+ # old name --- for legacy code
1768
+ random_vec = random_batch
1769
+
1770
+ def random(
1771
+ Integrator self not None, bint yield_hcube=False, bint yield_y=False
1772
+ ):
1773
+ r""" Low-level iterator over integration points and weights.
1774
+
1775
+ This method creates an iterator that returns integration
1776
+ points from |vegas|, and their corresponding weights in an
1777
+ integral. Each point ``x[d]`` is accompanied by the weight
1778
+ assigned to that point by |vegas| when estimating an integral.
1779
+ Optionally it will also return the index of the hypercube
1780
+ containing the integration point and/or the |y|-space
1781
+ coordinates::
1782
+
1783
+ integ.random() yields x, wgt
1784
+
1785
+ integ.random(yield_hcube=True) yields x, wgt, hcube
1786
+
1787
+ integ.random(yield_y=True) yields x, y, wgt
1788
+
1789
+ integ.random(yield_hcube=True, yield_y=True) yields x, y, wgt, hcube
1790
+
1791
+ The number of integration points returned by the iterator
1792
+ corresponds to a single iteration.
1793
+ """
1794
+ cdef double[:, ::1] x
1795
+ cdef double[::1] wgt
1796
+ cdef Py_ssize_t[::1] hcube
1797
+ cdef double[:, ::1] y
1798
+ cdef Py_ssize_t i
1799
+ if yield_hcube and yield_y:
1800
+ for x, y, wgt, hcube in self.random_batch(yield_hcube=True, yield_y=True):
1801
+ for i in range(x.shape[0]):
1802
+ yield (x[i], y[i], wgt[i], hcube[i])
1803
+ elif yield_y:
1804
+ for x, y, wgt in self.random_batch(yield_y=True):
1805
+ for i in range(x.shape[0]):
1806
+ yield (x[i], y[i], wgt[i])
1807
+ elif yield_hcube:
1808
+ for x, wgt, hcube in self.random_batch(yield_hcube=True):
1809
+ for i in range(x.shape[0]):
1810
+ yield (x[i], wgt[i], hcube[i])
1811
+ else:
1812
+ for x,wgt in self.random_batch():
1813
+ for i in range(x.shape[0]):
1814
+ yield (x[i], wgt[i])
1815
+
1816
+ def sample(self, nbatch=None, mode='rbatch'):
1817
+ r""" Generate random sample of integration weights and points.
1818
+
1819
+ Given a :class:`vegas.Integrator` called ``integ``, the code ::
1820
+
1821
+ wgt, x = integ.sample(mode='lbatch')
1822
+
1823
+ generates a random array of integration points ``x`` and the
1824
+ array of corresponding weights ``w`` such that ::
1825
+
1826
+ r = sum(wgt * f(x))
1827
+
1828
+ is an estimate of the integral of ``lbatch`` integrand ``f(x)``.
1829
+ Setting parameter ``mode='rbatch'`` formats ``x`` for use
1830
+ in ``rbatch`` integrands.
1831
+
1832
+ Parameter ``nbatch`` specifies the minimum number of integration
1833
+ points in the sample. The actual number is the smallest integer
1834
+ multiple of ``integ.last_neval`` that is equal to or larger than
1835
+ ``nbatch``.
1836
+ """
1837
+ neval = self.last_neval if self.last_neval > 0 else self.neval
1838
+ nbatch = neval if nbatch is None else int(nbatch)
1839
+ nit = nbatch // neval
1840
+ if nit * neval < nbatch:
1841
+ nit += 1
1842
+ samples = []
1843
+ wgts = []
1844
+ for _ in range(nit):
1845
+ for x, w in self.random_batch():
1846
+ samples.append(numpy.array(x))
1847
+ wgts.append(numpy.array(w))
1848
+ samples = numpy.concatenate(samples, axis=0)
1849
+ wgts = numpy.concatenate(wgts) / nit
1850
+ # need to fix following to allow other formats for x
1851
+ if self.xsample.shape is None:
1852
+ if mode == 'rbatch':
1853
+ samples = gvar.BufferDict(self.xsample, rbatch_buf=samples.T)
1854
+ else:
1855
+ samples = gvar.BufferDict(self.xsample, lbatch_buf=samples)
1856
+ else:
1857
+ if self.xsample.shape != ():
1858
+ if mode == 'rbatch':
1859
+ samples = samples.T
1860
+ samples.shape = self.xsample.shape + (-1,)
1861
+ else:
1862
+ samples.shape = (-1,) + self.xsample.shape
1863
+ return wgts, samples
1864
+
1865
+
1866
+ @staticmethod
1867
+ def synchronize_random():
1868
+ try:
1869
+ import mpi4py.MPI
1870
+ except ImportError:
1871
+ return
1872
+ comm = mpi4py.MPI.COMM_WORLD
1873
+ rank = comm.Get_rank()
1874
+ mpi_nproc = comm.Get_size()
1875
+ if mpi_nproc > 1:
1876
+ # synchronize random numbers
1877
+ if rank == 0:
1878
+ seed = gvar.ranseed(size=10)
1879
+ # seed = tuple(
1880
+ # gvar.randint(1, min(2**30, sys.maxsize), size=5)
1881
+ # )
1882
+ else:
1883
+ seed = None
1884
+ seed = comm.bcast(seed, root=0)
1885
+ gvar.ranseed(seed)
1886
+
1887
+ def _make_std_integrand(self, fcn, xsample=None):
1888
+ r""" Convert integrand ``fcn`` into an lbatch integrand.
1889
+
1890
+ Returns an object ``vi`` of type :class:`VegasIntegrand`.
1891
+ This object converts an arbitrary integrand ``fcn`` (``lbatch`, `rbatch`,
1892
+ and non-batch, with or without dictionaries for input or output)
1893
+ into a standard form: an lbatch integrand whose output is a
1894
+ 2-d lbatch array.
1895
+
1896
+ This is useful when building integrands that call other
1897
+ functions of the parameters. The latter are converted to
1898
+ lbatch integrands irrespective of what they were
1899
+ originally. This standardizes them, making it straightforward
1900
+ to build them into a new integrand.
1901
+ """
1902
+ if isinstance(fcn, VegasIntegrand):
1903
+ return fcn
1904
+ return VegasIntegrand(
1905
+ fcn=fcn,
1906
+ map=self.map,
1907
+ uses_jac=self.uses_jac,
1908
+ xsample=self.xsample if xsample is None else xsample,
1909
+ mpi=False if self.nproc > 1 else self.mpi
1910
+ )
1911
+
1912
+ def __call__(Integrator self not None, fcn, save=None, saveall=None, **kargs):
1913
+ r""" Integrate integrand ``fcn``.
1914
+
1915
+ A typical integrand has the form, for example::
1916
+
1917
+ def f(x):
1918
+ return x[0] ** 2 + x[1] ** 4
1919
+
1920
+ The argument ``x[d]`` is an integration point, where
1921
+ index ``d=0...`` represents direction within the
1922
+ integration volume.
1923
+
1924
+ Integrands can be array-valued, representing multiple
1925
+ integrands: e.g., ::
1926
+
1927
+ def f(x):
1928
+ return [x[0] ** 2, x[0] / x[1]]
1929
+
1930
+ The return arrays can have any shape. Dictionary-valued
1931
+ integrands are also supported: e.g., ::
1932
+
1933
+ def f(x):
1934
+ return dict(a=x[0] ** 2, b=[x[0] / x[1], x[1] / x[0]])
1935
+
1936
+ Integrand functions that return arrays or dictionaries
1937
+ are useful for multiple integrands that are closely related,
1938
+ and can lead to substantial reductions in the errors for
1939
+ ratios or differences of the results.
1940
+
1941
+ Integrand's take dictionaries as arguments when
1942
+ :class:`Integrator` keyword ``map`` is
1943
+ set equal to a dictionary. For example, with ::
1944
+
1945
+ map = dict(r=(0,1), theta=(0, np.pi), phi=(0, 2*np.pi))
1946
+
1947
+ the volume of a unit sphere is obtained by integrating ::
1948
+
1949
+ def f(xd):
1950
+ r = xd['r']
1951
+ theta = xd['theta']
1952
+ return r ** 2 * np.sin(theta)
1953
+
1954
+ It is usually much faster to use |vegas| in batch
1955
+ mode, where integration points are presented to the
1956
+ integrand in batches. A simple batch integrand might
1957
+ be, for example::
1958
+
1959
+ @vegas.lbatchintegrand
1960
+ def f(x):
1961
+ return x[:, 0] ** 2 + x[:, 1] ** 4
1962
+
1963
+ where decorator ``@vegas.lbatchintegrand`` tells
1964
+ |vegas| that the integrand processes integration
1965
+ points in batches. The array ``x[i, d]``
1966
+ represents a collection of different integration
1967
+ points labeled by ``i=0...``. (The number of points is controlled
1968
+ |Integrator| parameter ``min_neval_batch``.)
1969
+
1970
+ Batch mode is particularly useful (and fast) when the integrand
1971
+ is coded in Cython. Then loops over the integration points
1972
+ can be coded explicitly, avoiding the need to use
1973
+ :mod:`numpy`'s whole-array operators if they are not
1974
+ well suited to the integrand.
1975
+
1976
+ The batch index is always first (leftmost) for lbatch
1977
+ integrands, as above. It is also possible to create batch
1978
+ integrands where the batch index is the last (rightmost)
1979
+ index: for example, ::
1980
+
1981
+ @vegas.rbatchintegrand
1982
+ def f(x):
1983
+ return x[0, :] ** 2 + x[1, :] ** 4
1984
+
1985
+ Batch integrands can also be constructed from classes
1986
+ derived from :class:`vegas.LBatchIntegrand` or
1987
+ :class:`vegas.RBatchIntegrand`.
1988
+
1989
+ Any |vegas| parameter can also be reset: e.g.,
1990
+ ``self(fcn, nitn=20, neval=1e6)``.
1991
+
1992
+ Args:
1993
+ fcn (callable): Integrand function.
1994
+ save (str or file or None): Writes ``results`` into pickle file specified
1995
+ by ``save`` at the end of each iteration. For example, setting
1996
+ ``save='results.pkl'`` means that the results returned by the last
1997
+ vegas iteration can be reconstructed later using::
1998
+
1999
+ import pickle
2000
+ with open('results.pkl', 'rb') as ifile:
2001
+ results = pickle.load(ifile)
2002
+
2003
+ Ignored if ``save=None`` (default).
2004
+ saveall (str or file or None): Writes ``(results, integrator)`` into pickle
2005
+ file specified by ``saveall`` at the end of each iteration. For example,
2006
+ setting ``saveall='allresults.pkl'`` means that the results returned by
2007
+ the last vegas iteration, together with a clone of the (adapted) integrator,
2008
+ can be reconstructed later using::
2009
+
2010
+ import pickle
2011
+ with open('allresults.pkl', 'rb') as ifile:
2012
+ results, integrator = pickle.load(ifile)
2013
+
2014
+ Ignored if ``saveall=None`` (default).
2015
+
2016
+ Returns:
2017
+ Monte Carlo estimate of the integral of ``fcn(x)`` as
2018
+ an object of type :class:`vegas.RAvg`,
2019
+ :class:`vegas.RAvgArray`, or :class:`vegas.RAvgDict`.
2020
+ """
2021
+ cdef double[:, ::1] x
2022
+ # cdef double[:, ::1] jac
2023
+ cdef double[::1] wgt
2024
+ cdef Py_ssize_t[::1] hcube
2025
+
2026
+ cdef double[::1] sigf
2027
+ cdef double[:, ::1] y
2028
+ cdef double[::1] fdv2
2029
+ cdef double[:, ::1] fx
2030
+ cdef double[::1] dwf
2031
+ cdef double[::1] sum_wf
2032
+ cdef double[::1] sum_dwf
2033
+ cdef double[:, ::1] sum_dwf2
2034
+ cdef double[::1] mean # = numpy.empty(1, float)
2035
+ cdef double[:, ::1] var # = numpy.empty((1, 1), float)
2036
+ cdef Py_ssize_t itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
2037
+ cdef bint adaptive_strat
2038
+ cdef double sum_sigf, sigf2
2039
+ cdef bint firsteval = True
2040
+ cdef bint gpu_pad
2041
+ if kargs:
2042
+ self.set(kargs)
2043
+ gpu_pad = self.gpu_pad and (self.beta != 0) and (self.adapt == True)
2044
+ if self.nproc > 1:
2045
+ if self.nproc * self.min_neval_batch <= self.neval:
2046
+ old_defaults = self.set(mpi=False, min_neval_batch=self.nproc * self.min_neval_batch)
2047
+ else:
2048
+ old_defaults = self.set(mpi=False, min_neval_batch=self.neval // self.nproc)
2049
+ elif self.mpi:
2050
+ pass
2051
+
2052
+ adaptive_strat = (
2053
+ self.beta > 0 and self.nhcube > 1
2054
+ and self.adapt and not self.adapt_to_errors
2055
+ )
2056
+
2057
+ # synchronize random numbers across all processes (mpi)
2058
+ if self.sync_ran and self.mpi:
2059
+ self.synchronize_random()
2060
+
2061
+ # Put integrand into standard form
2062
+ fcn = self._make_std_integrand(fcn)
2063
+ # fcn = VegasIntegrand(
2064
+ # fcn, map=self.map, uses_jac=self.uses_jac, xsample=self.xsample,
2065
+ # mpi=False if self.nproc > 1 else self.mpi
2066
+ # )
2067
+ fcn_size = fcn.size
2068
+
2069
+ # allocate work arrays
2070
+ dwf = numpy.empty(fcn_size, float)
2071
+ sum_wf = numpy.empty(fcn_size, float)
2072
+ sum_dwf = numpy.empty(fcn_size, float)
2073
+ if self.correlate_integrals:
2074
+ sum_dwf2 = numpy.empty((fcn_size, fcn_size), float)
2075
+ else:
2076
+ sum_dwf2 = numpy.empty((fcn_size, 1), float)
2077
+ mean = numpy.zeros(fcn_size, float)
2078
+ if self.correlate_integrals:
2079
+ var = numpy.zeros((fcn_size, fcn_size), float)
2080
+ else:
2081
+ var = numpy.zeros((fcn_size,1), float)
2082
+ # mean[:] = 0.0
2083
+ # var[:, :] = 0.0
2084
+ result = VegasResult(fcn, weighted=self.adapt)
2085
+
2086
+ for itn in range(self.nitn):
2087
+ if self.analyzer is not None:
2088
+ self.analyzer.begin(itn, self)
2089
+
2090
+ # initalize arrays that accumulate results for a single iteration
2091
+ mean[:] = 0.0
2092
+ var[:, :] = 0.0
2093
+ sum_sigf = 0.0
2094
+
2095
+ # iterate batch-slices of integration points
2096
+ for x, y, wgt, hcube in self._random_batch(
2097
+ yield_hcube=True, yield_y=True, #fcn=fcn
2098
+ ):
2099
+ fdv2 = self.fdv2 # must be inside loop
2100
+ len_hcube = len(hcube)
2101
+
2102
+ # evaluate integrand at all points in x
2103
+ if gpu_pad:
2104
+ xa = numpy.asarray(self.x)
2105
+ xa[len(x):] = xa[len(x) - 1]
2106
+ else:
2107
+ xa = numpy.asarray(x)
2108
+ if self.nproc > 1:
2109
+ nx = x.shape[0] // self.nproc + 1
2110
+ if self.uses_jac:
2111
+ jac1d = self.map.jac1d(y)
2112
+ results = self.pool.starmap(
2113
+ fcn.eval,
2114
+ [(xa[i*nx : (i+1)*nx], jac1d[i*nx : (i+1)*nx]) for i in range(self.nproc) if i*nx < xa.shape[0]],
2115
+ 1,
2116
+ )
2117
+ else:
2118
+ results = self.pool.starmap(
2119
+ fcn.eval,
2120
+ [(xa[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < xa.shape[0]],
2121
+ 1,
2122
+ )
2123
+ fx = numpy.concatenate(results, axis=0, dtype=float)
2124
+ else:
2125
+ # fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2126
+ fx = numpy.asarray(
2127
+ fcn.eval(xa, jac=self.map.jac1d(y) if self.uses_jac else None),
2128
+ dtype=float
2129
+ )
2130
+ if gpu_pad:
2131
+ fx = fx[:len(x)]
2132
+ # sanity check
2133
+ if numpy.any(numpy.isnan(fx)):
2134
+ raise ValueError('integrand evaluates to nan')
2135
+
2136
+ # compute integral and variance for each h-cube
2137
+ # j is index of point within batch, i is hcube index
2138
+ j = 0
2139
+ sigf = self.sigf[hcube[0]:hcube[-1] + 1]
2140
+ for i in range(hcube[0], hcube[-1] + 1):
2141
+ # iterate over h-cubes
2142
+ sum_wf[:] = 0.0
2143
+ sum_dwf[:] = 0.0
2144
+ sum_dwf2[:, :] = 0.0
2145
+ neval = 0
2146
+ jtmp = j
2147
+ while jtmp < len_hcube and hcube[jtmp] == i:
2148
+ # iterate over points in hypercube for mean and neval
2149
+ for s in range(fcn_size):
2150
+ sum_wf[s] += wgt[jtmp] * fx[jtmp, s]
2151
+ jtmp += 1
2152
+ neval += 1
2153
+ while j < len_hcube and hcube[j] == i:
2154
+ # iterate over points in hypercube for variances
2155
+ for s in range(fcn_size):
2156
+ dwf[s] = wgt[j] * fx[j, s] - sum_wf[s] / neval
2157
+ if abs(dwf[s]) < EPSILON * abs(sum_wf[s] / neval):
2158
+ dwf[s] = EPSILON * abs(sum_wf[s] / neval)
2159
+ if self.correlate_integrals:
2160
+ sum_dwf2[s, s] += dwf[s] ** 2
2161
+ else:
2162
+ sum_dwf2[s, 0] += dwf[s] ** 2
2163
+ dwf[s] = 0. # kills off-diagonal covariances
2164
+ else:
2165
+ if self.correlate_integrals:
2166
+ sum_dwf2[s, s] += dwf[s] ** 2
2167
+ else:
2168
+ sum_dwf2[s, 0] += dwf[s] ** 2
2169
+ sum_dwf[s] += dwf[s] # doesn't contribute if round-off
2170
+ if self.correlate_integrals:
2171
+ for t in range(s):
2172
+ sum_dwf2[s, t] += dwf[s] * dwf[t]
2173
+ fdv2[j] = (wgt[j] * fx[j, 0] * neval) ** 2
2174
+ j += 1
2175
+ for s in range(fcn_size):
2176
+ # include Neely corrections (makes very little difference)
2177
+ mean[s] += sum_wf[s] + sum_dwf[s]
2178
+ if self.correlate_integrals:
2179
+ for t in range(s + 1):
2180
+ var[s, t] += (neval * sum_dwf2[s, t] - sum_dwf[s] * sum_dwf[t]) / (neval - 1.)
2181
+ else:
2182
+ var[s, 0] += (neval * sum_dwf2[s, 0] - sum_dwf[s] * sum_dwf[s]) / (neval - 1.)
2183
+ sigf2 = numpy.fabs((neval * sum_dwf2[0, 0] - sum_dwf[0] * sum_dwf[0]) / (neval - 1.))
2184
+ if adaptive_strat:
2185
+ sigf[i - hcube[0]] = sigf2 ** (self.beta / 2.)
2186
+ sum_sigf += sigf[i - hcube[0]]
2187
+ if self.adapt_to_errors and self.adapt:
2188
+ # replace fdv2 with variance
2189
+ # only one piece of data (from current hcube)
2190
+ fdv2[j - 1] = sigf2
2191
+ self.map.add_training_data(
2192
+ y[j - 1:, :], fdv2[j - 1:], 1
2193
+ )
2194
+ if self.minimize_mem:
2195
+ self.sigf[hcube[0]:hcube[-1] + 1] = sigf[:]
2196
+ if (not self.adapt_to_errors) and self.adapt and self.alpha > 0:
2197
+ self.map.add_training_data(y, fdv2, y.shape[0])
2198
+
2199
+ if self.correlate_integrals:
2200
+ for s in range(var.shape[0]):
2201
+ for t in range(s):
2202
+ var[t, s] = var[s, t]
2203
+ # accumulate result from this iteration
2204
+ result.update(mean, var, self.last_neval)
2205
+ else:
2206
+ # accumulate result from this iteration
2207
+ result.update(mean, var[:, 0], self.last_neval)
2208
+
2209
+ if self.beta > 0 and not self.adapt_to_errors and self.adapt:
2210
+ if sum_sigf > 0:
2211
+ self.sum_sigf = sum_sigf
2212
+ else:
2213
+ # integrand appears to be a constant => even distribution of points
2214
+ self.sigf[:] = 1.
2215
+ self.sum_sigf = len(self.sigf)
2216
+ if self.alpha > 0 and self.adapt:
2217
+ self.map.adapt(alpha=self.alpha)
2218
+ if self.analyzer is not None:
2219
+ result.update_analyzer(self.analyzer)
2220
+
2221
+ if save is not None:
2222
+ result.save(save)
2223
+ if saveall is not None:
2224
+ result.saveall(self, saveall)
2225
+
2226
+ if result.converged(self.rtol, self.atol):
2227
+ break
2228
+ if self.nproc > 1:
2229
+ self.set(old_defaults)
2230
+ return result.result
2231
+
2232
+ class reporter:
2233
+ r""" Analyzer class that prints out a report, iteration
2234
+ by interation, on how vegas is doing. Parameter ngrid
2235
+ specifies how many x[i]'s to print out from the maps
2236
+ for each axis.
2237
+
2238
+ Args:
2239
+ ngrid (int): Number of grid nodes printed out for
2240
+ each direction. Default is 0.
2241
+ """
2242
+ def __init__(self, ngrid=0):
2243
+ self.ngrid = ngrid
2244
+ self.clock = time.perf_counter if hasattr(time, 'perf_counter') else time.time
2245
+ # self.clock = time.time
2246
+
2247
+ def begin(self, itn, integrator):
2248
+ self.integrator = integrator
2249
+ self.itn = itn
2250
+ self.t0 = self.clock()
2251
+ if itn==0:
2252
+ print(integrator.settings())
2253
+ sys.stdout.flush()
2254
+
2255
+ def end(self, itn_ans, ans):
2256
+ print(" itn %2d: %s\n all itn's: %s"%(self.itn+1, itn_ans, ans))
2257
+ print(
2258
+ ' neval = %s neval/h-cube = %s\n chi2/dof = %.2f Q = %.2f time = %.2f'
2259
+ % (
2260
+ format(self.integrator.last_neval, '.6g'),
2261
+ tuple(self.integrator.neval_hcube_range),
2262
+ ans.chi2 / ans.dof if ans.dof > 0 else 0,
2263
+ ans.Q if ans.dof > 0 else 1.,
2264
+ self.clock() - self.t0
2265
+ )
2266
+ )
2267
+ print(self.integrator.map.settings(ngrid=self.ngrid))
2268
+ print('')
2269
+ sys.stdout.flush()
2270
+
2271
+ # Objects for accumulating the results from multiple iterations of vegas.
2272
+ # Results can be scalars (RAvg), arrays (RAvgArray), or dictionaries (RAvgDict).
2273
+ # Each stores results from each iterations, as well as a weighted (running)
2274
+ # average of the results of all iterations (unless parameter weigthed=False,
2275
+ # in which case the average is unweighted).
2276
+ class RAvg(gvar.GVar):
2277
+ r""" Running average of scalar-valued Monte Carlo estimates.
2278
+
2279
+ This class accumulates independent Monte Carlo
2280
+ estimates (e.g., of an integral) and combines
2281
+ them into a single average. It
2282
+ is derived from :class:`gvar.GVar` (from
2283
+ the :mod:`gvar` module if it is present) and
2284
+ represents a Gaussian random variable.
2285
+
2286
+ Different estimates are weighted by their
2287
+ inverse variances if parameter ``weight=True``;
2288
+ otherwise straight, unweighted averages are used.
2289
+ """
2290
+ def __init__(self, weighted=True, itn_results=None, sum_neval=0, _rescale=True):
2291
+ # rescale not used here
2292
+ self.rescale = None
2293
+ if weighted:
2294
+ self._wlist = []
2295
+ self.weighted = True
2296
+ else:
2297
+ self._msum = 0.
2298
+ self._varsum = 0.
2299
+ self._n = 0
2300
+ self.weighted = False
2301
+ self._mlist = []
2302
+ self.itn_results = []
2303
+ if itn_results is None:
2304
+ super(RAvg, self).__init__(
2305
+ *gvar.gvar(0., 0.).internaldata,
2306
+ )
2307
+ else:
2308
+ if isinstance(itn_results, bytes):
2309
+ itn_results = gvar.loads(itn_results)
2310
+ for r in itn_results:
2311
+ self.add(r)
2312
+ self.sum_neval = sum_neval
2313
+
2314
+ def extend(self, ravg):
2315
+ r""" Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``. """
2316
+ for r in ravg.itn_results:
2317
+ self.add(r)
2318
+ self.sum_neval += ravg.sum_neval
2319
+
2320
+ def __reduce_ex__(self, protocol):
2321
+ return (
2322
+ RAvg,
2323
+ (self.weighted, gvar.dumps(self.itn_results, protocol=protocol), self.sum_neval)
2324
+ )
2325
+
2326
+ def _remove_gvars(self, gvlist):
2327
+ tmp = RAvg(
2328
+ weighted=self.weighted,
2329
+ itn_results=self.itn_results,
2330
+ sum_neval=self.sum_neval,
2331
+ )
2332
+ tmp.itn_results = gvar.remove_gvars(tmp.itn_results, gvlist)
2333
+ tgvar = gvar.gvar_factory() # small cov matrix
2334
+ super(RAvg, tmp).__init__(*tgvar(0,0).internaldata)
2335
+ return tmp
2336
+
2337
+ def _distribute_gvars(self, gvlist):
2338
+ return RAvg(
2339
+ weighted=self.weighted,
2340
+ itn_results = gvar.distribute_gvars(self.itn_results, gvlist),
2341
+ sum_neval=self.sum_neval,
2342
+ )
2343
+
2344
+ def _chi2(self):
2345
+ if len(self.itn_results) <= 1:
2346
+ return 0.0
2347
+ if self.weighted:
2348
+ wavg = self.mean
2349
+ ans = 0.0
2350
+ for m, w in zip(self._mlist, self._wlist):
2351
+ ans += (wavg - m) ** 2 * w
2352
+ return ans
2353
+ else:
2354
+ wavg = self.mean
2355
+ ans = numpy.sum([(m - wavg) ** 2 for m in self._mlist]) / (self._varsum / self._n)
2356
+ return ans
2357
+ chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
2358
+
2359
+ def _dof(self):
2360
+ return len(self.itn_results) - 1
2361
+ dof = property(
2362
+ _dof,
2363
+ None,
2364
+ None,
2365
+ "Number of degrees of freedom in weighted average."
2366
+ )
2367
+
2368
+ def _nitn(self):
2369
+ return len(self.itn_results)
2370
+ nitn = property(_nitn, None, None, "Number of iterations.")
2371
+
2372
+ def _Q(self):
2373
+ return (
2374
+ gvar.gammaQ(self.dof / 2., self.chi2 / 2.)
2375
+ if self.dof > 0 and self.chi2 >= 0
2376
+ else float('nan')
2377
+ )
2378
+ Q = property(
2379
+ _Q,
2380
+ None,
2381
+ None,
2382
+ "*Q* or *p-value* of weighted average's *chi**2*.",
2383
+ )
2384
+
2385
+ def _avg_neval(self):
2386
+ return self.sum_neval / self.nitn if self.nitn > 0 else 0
2387
+ avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2388
+
2389
+ def converged(self, rtol, atol):
2390
+ return self.sdev < atol + rtol * abs(self.mean)
2391
+
2392
+ def add(self, g):
2393
+ r""" Add estimate ``g`` to the running average. """
2394
+ self.itn_results.append(g)
2395
+ if isinstance(g, gvar.GVarRef):
2396
+ return
2397
+ self._mlist.append(g.mean)
2398
+ if self.weighted:
2399
+ self._wlist.append(1 / (g.var if g.var > TINY else TINY))
2400
+ var = 1. / numpy.sum(self._wlist)
2401
+ sdev = numpy.sqrt(var)
2402
+ mean = numpy.sum([w * m for w, m in zip(self._wlist, self._mlist)]) * var
2403
+ super(RAvg, self).__init__(*gvar.gvar(mean, sdev).internaldata)
2404
+ else:
2405
+ self._msum += g.mean
2406
+ self._varsum += g.var #if g.var > TINY else TINY
2407
+ self._n += 1
2408
+ mean = self._msum / self._n
2409
+ var = self._varsum / self._n ** 2
2410
+ super(RAvg, self).__init__(*gvar.gvar(mean, numpy.sqrt(var)).internaldata)
2411
+
2412
+ def summary(self, extended=False, weighted=None):
2413
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2414
+
2415
+ Args:
2416
+ weighted (bool): Display weighted averages of results from different
2417
+ iterations if ``True``; otherwise show unweighted averages.
2418
+ Default behavior is determined by |vegas|.
2419
+ """
2420
+ if weighted is None:
2421
+ weighted = self.weighted
2422
+ acc = RAvg(weighted=weighted)
2423
+ linedata = []
2424
+ for i, res in enumerate(self.itn_results):
2425
+ acc.add(res)
2426
+ if i > 0:
2427
+ chi2_dof = acc.chi2 / acc.dof
2428
+ Q = acc.Q
2429
+ else:
2430
+ chi2_dof = 0.0
2431
+ Q = 1.0
2432
+ itn = '%3d' % (i + 1)
2433
+ integral = '%-15s' % res
2434
+ wgtavg = '%-15s' % acc
2435
+ chi2dof = '%8.2f' % (acc.chi2 / acc.dof if i != 0 else 0.0)
2436
+ Q = '%8.2f' % (acc.Q if i != 0 else 1.0)
2437
+ linedata.append((itn, integral, wgtavg, chi2dof, Q))
2438
+ nchar = 5 * [0]
2439
+ for data in linedata:
2440
+ for i, d in enumerate(data):
2441
+ if len(d) > nchar[i]:
2442
+ nchar[i] = len(d)
2443
+ fmt = '%%%ds %%-%ds %%-%ds %%%ds %%%ds\n' % tuple(nchar)
2444
+ if weighted:
2445
+ ans = fmt % ('itn', 'integral', 'wgt average', 'chi2/dof', 'Q')
2446
+ else:
2447
+ ans = fmt % ('itn', 'integral', 'average', 'chi2/dof', 'Q')
2448
+ ans += len(ans[:-1]) * '-' + '\n'
2449
+ for data in linedata:
2450
+ ans += fmt % data
2451
+ return ans
2452
+
2453
+ class RAvgDict(gvar.BufferDict):
2454
+ r""" Running average of dictionary-valued Monte Carlo estimates.
2455
+
2456
+ This class accumulates independent dictionaries of Monte Carlo
2457
+ estimates (e.g., of an integral) and combines
2458
+ them into a dictionary of averages. It
2459
+ is derived from :class:`gvar.BufferDict`. The dictionary
2460
+ values are :class:`gvar.GVar`\s or arrays of :class:`gvar.GVar`\s.
2461
+
2462
+ Different estimates are weighted by their
2463
+ inverse covariance matrices if parameter ``weight=True``;
2464
+ otherwise straight, unweighted averages are used.
2465
+ """
2466
+ def __init__(self, dictionary=None, weighted=True, itn_results=None, sum_neval=0, rescale=True):
2467
+ if isinstance(itn_results, bytes):
2468
+ itn_results = gvar.loads(itn_results)
2469
+ if dictionary is None and (itn_results is None or len(itn_results) < 1):
2470
+ raise ValueError('must specificy dictionary or itn_results')
2471
+ super(RAvgDict, self).__init__(dictionary if dictionary is not None else itn_results[0])
2472
+ self.rarray = RAvgArray(shape=(self.size,), weighted=weighted, rescale=rescale)
2473
+ self.buf = numpy.asarray(self.rarray) # turns it into a normal ndarray
2474
+ self.itn_results = []
2475
+ self.weighted = weighted
2476
+ if itn_results is not None:
2477
+ for r in itn_results:
2478
+ self.add(r)
2479
+ self.sum_neval = sum_neval
2480
+
2481
+ def extend(self, ravg):
2482
+ r""" Merge results from :class:`RAvgDict` object ``ravg`` after results currently in ``self``. """
2483
+ for r in ravg.itn_results:
2484
+ self.add(r)
2485
+ self.sum_neval += ravg.sum_neval
2486
+
2487
+ def __reduce_ex__(self, protocol):
2488
+ return (
2489
+ RAvgDict,
2490
+ (None, self.weighted, gvar.dumps(self.itn_results, protocol=protocol), self.sum_neval, self.rescale),
2491
+ )
2492
+
2493
+ def _remove_gvars(self, gvlist):
2494
+ tmp = RAvgDict(
2495
+ weighted=self.weighted,
2496
+ itn_results=[gvar.BufferDict(x) for x in self.itn_results],
2497
+ sum_neval=self.sum_neval,
2498
+ rescale=self.rescale,
2499
+ )
2500
+ tmp.rarray = gvar.remove_gvars(tmp.rarray, gvlist)
2501
+ tmp._buf = gvar.remove_gvars(tmp.buf, gvlist)
2502
+ return tmp
2503
+
2504
+ def _distribute_gvars(self, gvlist):
2505
+ self.rarray = gvar.distribute_gvars(self.rarray, gvlist)
2506
+ self._buf = gvar.distribute_gvars(self.buf, gvlist)
2507
+ return self
2508
+
2509
+ def converged(self, rtol, atol):
2510
+ return numpy.all(
2511
+ gvar.sdev(self.buf) <
2512
+ atol + rtol * numpy.abs(gvar.mean(self.buf))
2513
+ )
2514
+
2515
+ def add(self, g):
2516
+ if isinstance(g, gvar.BufferDict):
2517
+ newg = gvar.BufferDict(g)
2518
+ else:
2519
+ newg = gvar.BufferDict()
2520
+ for k in self:
2521
+ try:
2522
+ newg[k] = g[k]
2523
+ except AttributeError:
2524
+ raise ValueError(
2525
+ "Dictionary g doesn't contain key " + str(k) + '.'
2526
+ )
2527
+ self.itn_results.append(newg)
2528
+ self.rarray.add(newg.buf)
2529
+
2530
+ def summary(self, extended=False, weighted=None, rescale=None):
2531
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2532
+
2533
+ Args:
2534
+ extended (bool): Include a table of final averages for every
2535
+ component of the integrand if ``True``. Default is ``False``.
2536
+ weighted (bool): Display weighted averages of results from different
2537
+ iterations if ``True``; otherwise show unweighted averages.
2538
+ Default behavior is determined by |vegas|.
2539
+ """
2540
+ if weighted is None:
2541
+ weighted = self.weighted
2542
+ if rescale is None:
2543
+ rescale = self.rarray.rescale
2544
+ ans = self.rarray.summary(weighted=weighted, extended=False, rescale=rescale)
2545
+ if extended and self.itn_results[0].size > 1:
2546
+ ans += '\n' + gvar.tabulate(self) + '\n'
2547
+ return ans
2548
+
2549
+ def _chi2(self):
2550
+ return self.rarray.chi2
2551
+ chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
2552
+
2553
+ def _dof(self):
2554
+ return self.rarray.dof
2555
+ dof = property(
2556
+ _dof, None, None,
2557
+ "Number of degrees of freedom in weighted average."
2558
+ )
2559
+
2560
+ def _nitn(self):
2561
+ return len(self.itn_results)
2562
+ nitn = property(_nitn, None, None, "Number of iterations.")
2563
+
2564
+ def _Q(self):
2565
+ return self.rarray.Q
2566
+ Q = property(
2567
+ _Q, None, None,
2568
+ "*Q* or *p-value* of weighted average's *chi**2*.",
2569
+ )
2570
+
2571
+ def _avg_neval(self):
2572
+ return self.sum_neval / self.nitn if self.nitn > 0 else 0
2573
+ avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2574
+
2575
+ def _get_rescale(self):
2576
+ return self.rarray.rescale
2577
+ rescale = property(_get_rescale, None, None, "Integrals divided by ``rescale`` before doing weighted averages.")
2578
+
2579
+ class RAvgArray(numpy.ndarray):
2580
+ r""" Running average of array-valued Monte Carlo estimates.
2581
+
2582
+ This class accumulates independent arrays of Monte Carlo
2583
+ estimates (e.g., of an integral) and combines
2584
+ them into an array of averages. It
2585
+ is derived from :class:`numpy.ndarray`. The array
2586
+ elements are :class:`gvar.GVar`\s (from the ``gvar`` module if
2587
+ present) and represent Gaussian random variables.
2588
+
2589
+ Different estimates are weighted by their
2590
+ inverse covariance matrices if parameter ``weight=True``;
2591
+ otherwise straight, unweighted averages are used.
2592
+ """
2593
+ def __new__(
2594
+ subtype, shape=None,
2595
+ dtype=object, buffer=None, offset=0, strides=None, order=None,
2596
+ weighted=True, itn_results=None, sum_neval=0, rescale=True
2597
+ ):
2598
+ if shape is None and (itn_results is None or len(itn_results) < 1):
2599
+ raise ValueError('must specificy shape or itn_results')
2600
+ obj = numpy.ndarray.__new__(
2601
+ subtype, shape=shape if shape is not None else numpy.shape(itn_results[0]),
2602
+ dtype=object, buffer=buffer, offset=offset,
2603
+ strides=strides, order=order
2604
+ )
2605
+ if buffer is None:
2606
+ obj.flat = numpy.array(obj.size * [gvar.gvar(0,0)])
2607
+ obj.itn_results = []
2608
+ obj._mlist = []
2609
+ if rescale is False or rescale is None or not weighted:
2610
+ obj.rescale = None
2611
+ elif rescale is True:
2612
+ obj.rescale = True
2613
+ else:
2614
+ # flatten rescale
2615
+ if hasattr(rescale, 'keys'):
2616
+ obj.rescale = gvar.asbufferdict(rescale)
2617
+ else:
2618
+ obj.rescale = numpy.asarray(rescale)
2619
+ if weighted:
2620
+ obj.weighted = True
2621
+ obj._wlist = []
2622
+ else:
2623
+ obj._msum = 0.
2624
+ obj._covsum = 0.
2625
+ obj._n = 0
2626
+ obj.weighted = False
2627
+ obj.sum_neval = sum_neval
2628
+ return obj
2629
+
2630
+ def _remove_gvars(self, gvlist):
2631
+ tmp = RAvgArray(
2632
+ weighted=self.weighted,
2633
+ itn_results= [numpy.array(x) for x in self.itn_results],
2634
+ sum_neval=self.sum_neval,
2635
+ rescale=self.rescale
2636
+ )
2637
+ tmp.itn_results = gvar.remove_gvars(tmp.itn_results, gvlist)
2638
+ tmp.flat[:] = gvar.remove_gvars(numpy.array(tmp), gvlist)
2639
+ return tmp
2640
+
2641
+ def _distribute_gvars(self, gvlist):
2642
+ return RAvgArray(
2643
+ weighted=self.weighted,
2644
+ itn_results=gvar.distribute_gvars(self.itn_results, gvlist),
2645
+ sum_neval=self.sum_neval,
2646
+ rescale=self.rescale,
2647
+ )
2648
+
2649
+ def __reduce_ex__(self, protocol):
2650
+ save = numpy.array(self.flat[:])
2651
+ self.flat[:] = 0
2652
+ superpickled = super(RAvgArray, self).__reduce__()
2653
+ self.flat[:] = save
2654
+ state = superpickled[2] + (
2655
+ self.weighted, gvar.dumps(self.itn_results, protocol=protocol),
2656
+ (self.sum_neval, self.rescale),
2657
+ )
2658
+ return (superpickled[0], superpickled[1], state)
2659
+
2660
+ def __setstate__(self, state):
2661
+ super(RAvgArray, self).__setstate__(state[:-3])
2662
+ if isinstance(state[-1], tuple):
2663
+ self.sum_neval, self.rescale = state[-1]
2664
+ else:
2665
+ # included for compatibility with previous versions
2666
+ self.sum_neval = state[-1]
2667
+ self.rescale = True
2668
+ itn_results = gvar.loads(state[-2])
2669
+ self.weighted = state[-3]
2670
+ if self.weighted:
2671
+ self._wlist = []
2672
+ self._mlist = []
2673
+ else:
2674
+ self._msum = 0.
2675
+ self._covsum = 0.
2676
+ self._n = 0
2677
+ self.itn_results = []
2678
+ for r in itn_results:
2679
+ self.add(r)
2680
+
2681
+ def __array_finalize__(self, obj):
2682
+ if obj is None:
2683
+ return
2684
+ if obj.weighted:
2685
+ self.weighted = getattr(obj, 'weighted', True)
2686
+ self._wlist = getattr(obj, '_wlist', [])
2687
+ else:
2688
+ self._msum = getattr(obj, '_msum', 0.)
2689
+ self._covsum = getattr(obj, '_cov', 0.)
2690
+ self._n = getattr(obj, '_n', 0.)
2691
+ self.weighted = getattr(obj, 'weighted', False)
2692
+ self._mlist = getattr(obj, '_mlist', [])
2693
+ self.itn_results = getattr(obj, 'itn_results', [])
2694
+ self.sum_neval = getattr(obj, 'sum_neval', 0)
2695
+ self.rescale = getattr(obj, 'rescale', True)
2696
+
2697
+ def __init__(self, shape=None,
2698
+ dtype=object, buffer=None, offset=0, strides=None, order=None,
2699
+ weighted=True, itn_results=None, sum_neval=0, rescale=True):
2700
+ # needed because array_finalize can't handle self.add(r)
2701
+ self[:] *= 0
2702
+ if itn_results is not None:
2703
+ if isinstance(itn_results, bytes):
2704
+ itn_results = gvar.loads(itn_results)
2705
+ self.itn_results = []
2706
+ for r in itn_results:
2707
+ self.add(r)
2708
+
2709
+ def extend(self, ravg):
2710
+ r""" Merge results from :class:`RAvgArray` object ``ravg`` after results currently in ``self``. """
2711
+ for r in ravg.itn_results:
2712
+ self.add(r)
2713
+ self.sum_neval += ravg.sum_neval
2714
+
2715
+ def _w(self, matrix, rescale=False):
2716
+ " Decompose inverse matrix, with protection against singular matrices. "
2717
+ # extra factor of 1e4 is from trial and error with degenerate integrands (need extra buffer);
2718
+ # also negative svdcut and rescale=False are important for degenerate integrands
2719
+ # (alternative svdcut>0 and rescale=True introduces biases; also rescale=True not needed
2720
+ # since now have self.rescale)
2721
+ s = gvar.SVD(matrix, svdcut=-EPSILON * len(matrix) * 1e4, rescale=rescale)
2722
+ return s.decomp(-1)
2723
+
2724
+ def converged(self, rtol, atol):
2725
+ return numpy.all(
2726
+ gvar.sdev(self) < atol + rtol * numpy.abs(gvar.mean(self))
2727
+ )
2728
+
2729
+ def _chi2(self):
2730
+ if len(self.itn_results) <= 1:
2731
+ return 0.0
2732
+ if self.weighted:
2733
+ ans = 0.0
2734
+ wavg = gvar.mean(self).reshape((-1,))
2735
+ if self.rescale is not None:
2736
+ wavg /= self._rescale
2737
+ for ri, w, m in zip(self.itn_results, self._wlist, self._mlist):
2738
+ for wi in w:
2739
+ ans += wi.dot(m - wavg) ** 2
2740
+ return ans
2741
+ else:
2742
+ if self._invw is None:
2743
+ self._invw = self._w(self._covsum /self._n)
2744
+ wavg = gvar.mean(self).reshape((-1,))
2745
+ ans = 0.0
2746
+ for m in self._mlist:
2747
+ delta = wavg - m
2748
+ for invwi in self._invw:
2749
+ ans += invwi.dot(delta) ** 2
2750
+ return ans
2751
+ chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
2752
+
2753
+ def _dof(self):
2754
+ if len(self.itn_results) <= 1:
2755
+ return 0
2756
+ if not self.weighted:
2757
+ if self._invw is None:
2758
+ self._invw = self._w(self._covsum /self._n)
2759
+ return (len(self.itn_results) - 1) * len(self._invw)
2760
+ else:
2761
+ return numpy.sum([len(w) for w in self._wlist]) - self.size
2762
+ dof = property(
2763
+ _dof, None, None,
2764
+ "Number of degrees of freedom in weighted average."
2765
+ )
2766
+
2767
+ def _nitn(self):
2768
+ return len(self.itn_results)
2769
+ nitn = property(_nitn, None, None, "Number of iterations.")
2770
+
2771
+ def _Q(self):
2772
+ if self.dof <= 0 or self.chi2 < 0:
2773
+ return float('nan')
2774
+ return gvar.gammaQ(self.dof / 2., self.chi2 / 2.)
2775
+ Q = property(
2776
+ _Q, None, None,
2777
+ "*Q* or *p-value* of weighted average's *chi**2*.",
2778
+ )
2779
+
2780
+ def _avg_neval(self):
2781
+ return self.sum_neval / self.nitn if self.nitn > 0 else 0
2782
+ avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2783
+
2784
+ def add(self, g):
2785
+ r""" Add estimate ``g`` to the running average. """
2786
+ g = numpy.asarray(g)
2787
+ self.itn_results.append(g)
2788
+ if g.size > 1 and isinstance(g.flat[0], gvar.GVarRef):
2789
+ return
2790
+ g = g.reshape((-1,))
2791
+ if self.weighted:
2792
+ if not hasattr(self, '_rescale'):
2793
+ if self.rescale is not None:
2794
+ self._rescale = numpy.fabs(gvar.mean(g if self.rescale is True else self.rescale.flat[:]))
2795
+ gsdev = gvar.sdev(g)
2796
+ idx = gsdev > self._rescale
2797
+ self._rescale[idx] = gsdev[idx]
2798
+ self._rescale[self._rescale <= 0] = 1.
2799
+ else:
2800
+ self._rescale = 1.
2801
+ g = g / self._rescale
2802
+ gmean = gvar.mean(g)
2803
+ gcov = gvar.evalcov(g)
2804
+ for i in range(len(gcov)):
2805
+ if gcov[i,i] <= 0:
2806
+ gcov[i,i] = TINY
2807
+ self._mlist.append(gmean)
2808
+ self._wlist.append(self._w(gcov))
2809
+ invcov = numpy.sum([(w.T).dot(w) for w in self._wlist], axis=0)
2810
+ invw = self._w(invcov)
2811
+ cov = (invw.T).dot(invw)
2812
+ mean = 0.0
2813
+ for m, w in zip(self._mlist, self._wlist):
2814
+ for wj in w:
2815
+ wj_m = wj.dot(m)
2816
+ for invwi in invw:
2817
+ mean += invwi * invwi.dot(wj) * wj_m
2818
+ self[:] = (gvar.gvar(mean, cov) * self._rescale).reshape(self.shape)
2819
+ else:
2820
+ gmean = gvar.mean(g)
2821
+ gcov = gvar.evalcov(g)
2822
+ # idx = (gcov[numpy.diag_indices_from(gcov)] <= 0.0)
2823
+ # gcov[numpy.diag_indices_from(gcov)][idx] = TINY
2824
+ self._mlist.append(gmean)
2825
+ self._msum += gmean
2826
+ self._covsum += gcov
2827
+ self._invw = None
2828
+ self._n += 1
2829
+ mean = self._msum / self._n
2830
+ cov = self._covsum / (self._n ** 2)
2831
+ self[:] = gvar.gvar(mean, cov).reshape(self.shape)
2832
+
2833
+ def summary(self, extended=False, weighted=None, rescale=None):
2834
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2835
+
2836
+ Args:
2837
+ extended (bool): Include a table of final averages for every
2838
+ component of the integrand if ``True``. Default is ``False``.
2839
+ weighted (bool): Display weighted averages of results from different
2840
+ iterations if ``True``; otherwise show unweighted averages.
2841
+ Default behavior is determined by |vegas|.
2842
+ """
2843
+ if weighted is None:
2844
+ weighted = self.weighted
2845
+ if rescale is None:
2846
+ rescale = self.rescale
2847
+ acc = RAvgArray(self.shape, weighted=weighted, rescale=rescale)
2848
+
2849
+ linedata = []
2850
+ for i, res in enumerate(self.itn_results):
2851
+ acc.add(res)
2852
+ if i > 0:
2853
+ chi2_dof = acc.chi2 / acc.dof
2854
+ Q = acc.Q
2855
+ else:
2856
+ chi2_dof = 0.0
2857
+ Q = 1.0
2858
+ itn = '%3d' % (i + 1)
2859
+ integral = '%-15s' % res.flat[0]
2860
+ wgtavg = '%-15s' % acc.flat[0]
2861
+ chi2dof = '%8.2f' % (acc.chi2 / acc.dof if i != 0 else 0.0)
2862
+ Q = '%8.2f' % (acc.Q if i != 0 else 1.0)
2863
+ linedata.append((itn, integral, wgtavg, chi2dof, Q))
2864
+ nchar = 5 * [0]
2865
+ for data in linedata:
2866
+ for i, d in enumerate(data):
2867
+ if len(d) > nchar[i]:
2868
+ nchar[i] = len(d)
2869
+ fmt = '%%%ds %%-%ds %%-%ds %%%ds %%%ds\n' % tuple(nchar)
2870
+ if weighted:
2871
+ ans = fmt % ('itn', 'integral', 'wgt average', 'chi2/dof', 'Q')
2872
+ else:
2873
+ ans = fmt % ('itn', 'integral', 'average', 'chi2/dof', 'Q')
2874
+ ans += len(ans[:-1]) * '-' + '\n'
2875
+ for data in linedata:
2876
+ ans += fmt % data
2877
+ if extended and self.itn_results[0].size > 1:
2878
+ ans += '\n' + gvar.tabulate(self) + '\n'
2879
+ return ans
2880
+
2881
+ ################
2882
+ # Classes that standarize the interface for integrands. Internally vegas
2883
+ # assumes batch integrands that take an array x[i,d] as argument and
2884
+ # returns an array fx[i, d] where i = batch index and d = index over
2885
+ # dimenions or integrand components. VegasIntegrand figures out how
2886
+ # to convert the various types of integrand to this format. Integrands that
2887
+ # return scalars or arrays or dictionaries lead to integration results that
2888
+ # scalars or arrays or dictionaries, respectively; VegasResult figures
2889
+ # out how to convert the 1-d array used internally in vegas into the
2890
+ # appropriate structure given the integrand structure.
2891
+
2892
+ cdef class VegasResult:
2893
+ cdef readonly object integrand
2894
+ cdef readonly object shape
2895
+ cdef readonly object result
2896
+ cdef readonly double sum_neval
2897
+ """ Accumulated result object --- standard interface for integration results.
2898
+
2899
+ Integrands are flattened into 2-d arrays in |vegas|. This object
2900
+ accumulates integration results from multiple iterations of |vegas|
2901
+ and can convert them to the original integrand format. It also counts
2902
+ the number of integrand evaluations used in all and adds it to the
2903
+ result (``sum_neval``).
2904
+
2905
+ Args:
2906
+ integrand: :class:`VegasIntegrand` object.
2907
+ weighted (bool): use weighted average across iterations?
2908
+
2909
+ Attributes:
2910
+ shape: shape of integrand result or ``None`` if dictionary.
2911
+ result: accumulation of integral results. This is an object
2912
+ of type :class:`vegas.RAvgArray` for array-valued integrands,
2913
+ :class:`vegas.RAvgDict` for dictionary-valued integrands, and
2914
+ :class:`vegas.RAvg` for scalar-valued integrands.
2915
+ sum_neval: total number of integrand evaluations in all iterations.
2916
+ avg_neval: average number of integrand evaluations per iteration.
2917
+ """
2918
+ def __init__(self, integrand=None, weighted=None):
2919
+ self.integrand = integrand
2920
+ self.shape = integrand.shape
2921
+ self.sum_neval = 0
2922
+ if self.shape is None:
2923
+ self.result = RAvgDict(integrand.bdict, weighted=weighted)
2924
+ elif self.shape == ():
2925
+ self.result = RAvg(weighted=weighted)
2926
+ else:
2927
+ self.result = RAvgArray(self.shape, weighted=weighted)
2928
+
2929
+ def save(self, outfile):
2930
+ " pickle current results in ``outfile`` for later use. "
2931
+ if isinstance(outfile, str) or sys.version_info.major == 2:
2932
+ with open(outfile, 'wb') as ofile:
2933
+ pickle.dump(self.result, ofile)
2934
+ else:
2935
+ pickle.dump(self.result, outfile)
2936
+
2937
+ def saveall(self, integrator, outfile):
2938
+ " pickle current (results,integrator) in ``outfile`` for later use. "
2939
+ if isinstance(outfile, str) or sys.version_info.major == 2:
2940
+ with open(outfile, 'wb') as ofile:
2941
+ pickle.dump((self.result, integrator), ofile)
2942
+ else:
2943
+ pickle.dump((self.result, integrator), outfile)
2944
+
2945
+ def update(self, mean, var, last_neval=None):
2946
+ self.result.add(self.integrand.format_result(mean, var))
2947
+ if last_neval is not None:
2948
+ self.sum_neval += last_neval
2949
+ self.result.sum_neval = self.sum_neval
2950
+
2951
+ def update_analyzer(self, analyzer):
2952
+ r""" Update analyzer at end of an iteration. """
2953
+ analyzer.end(self.result.itn_results[-1], self.result)
2954
+
2955
+ def converged(self, rtol, atol):
2956
+ " Convergence test. "
2957
+ return self.result.converged(rtol, atol)
2958
+
2959
+ cdef class VegasIntegrand:
2960
+ cdef public object shape
2961
+ cdef public object fcntype
2962
+ cdef public Py_ssize_t size
2963
+ cdef public object eval
2964
+ cdef public object bdict
2965
+ cdef public int mpi_nproc # number of MPI processors
2966
+ cdef public int rank
2967
+ cdef public object comm
2968
+ """ Integand object --- standard interface for integrands
2969
+
2970
+ This class provides a standard interface for all |vegas| integrands.
2971
+ It analyzes the integrand to determine the shape of its output.
2972
+
2973
+ All integrands are converted to lbatch integrands. Method ``eval(x)``
2974
+ takes argument ``x[i,d]`` and returns `fx[i,c]`` where ``i``
2975
+ is the batch index, ``d`` indexes different directions in x-space,
2976
+ and ``c`` indexes the different components of the integrand.
2977
+
2978
+ The integrands are configured for parallel processing
2979
+ using MPI (via :mod:`mpi4py`) if ``mpi=True``.
2980
+
2981
+ Args:
2982
+ fcn: Integrand function.
2983
+ map: Integrator's :class:`AdaptiveMap`.
2984
+ uses_jac: Determines whether or not function call receives the Jacobian.
2985
+ xsample: Random point from x-space (properly formatted as dict or array).
2986
+ mpi: ``True`` if mpi might be used; ``False`` (default) otherwise.
2987
+
2988
+ Attributes:
2989
+ eval: ``eval(x)`` returns ``fcn(x)`` repacked as a 2-d array.
2990
+ shape: Shape of integrand ``fcn(x)`` or ``None`` if it is a dictionary.
2991
+ size: Size of integrand.
2992
+ mpi_nproc: Number of MPI processors (=1 if no MPI)
2993
+ rank: MPI rank of processors (=0 if no MPI)
2994
+ """
2995
+ def __init__(self, fcn, map, uses_jac, xsample, mpi):
2996
+ if isinstance(fcn, type(LBatchIntegrand)) or isinstance(fcn, type(RBatchIntegrand)):
2997
+ raise ValueError(
2998
+ 'integrand given is a class, not an object -- need to initialize?'
2999
+ )
3000
+ if mpi:
3001
+ try:
3002
+ import mpi4py.MPI
3003
+ self.comm = mpi4py.MPI.COMM_WORLD
3004
+ self.rank = self.comm.Get_rank()
3005
+ self.mpi_nproc = self.comm.Get_size()
3006
+ except ImportError:
3007
+ self.mpi_nproc = 1
3008
+ else:
3009
+ self.mpi_nproc = 1
3010
+
3011
+ # configure using sample evaluation fcn(x) to
3012
+ # determine integrand shape
3013
+
3014
+ # sample x, jac
3015
+ xsample = gvar.mean(xsample)
3016
+ x0 = xsample
3017
+ if uses_jac:
3018
+ if xsample.shape is None:
3019
+ jac0 = gvar.BufferDict(xsample, buf=xsample.size * [1])
3020
+ else:
3021
+ jac0 = numpy.ones(xsample.shape, dtype=float)
3022
+ else:
3023
+ jac0 = None
3024
+
3025
+ # configure self.eval
3026
+ self.fcntype = getattr(fcn, 'fcntype', 'scalar')
3027
+ if self.fcntype == 'scalar':
3028
+ fx = fcn(x0, jac=jac0) if uses_jac else fcn(x0)
3029
+ if hasattr(fx, 'keys'):
3030
+ if not isinstance(fx, gvar.BufferDict):
3031
+ fx = gvar.BufferDict(fx)
3032
+ self.size = fx.size
3033
+ self.shape = None
3034
+ self.bdict = fx
3035
+ _eval = _BatchIntegrand_from_NonBatchDict(fcn, self.size, xsample)
3036
+ else:
3037
+ fx = numpy.asarray(fx)
3038
+ self.shape = fx.shape
3039
+ self.size = fx.size
3040
+ _eval = _BatchIntegrand_from_NonBatch(fcn, self.size, self.shape, xsample)
3041
+ elif self.fcntype == 'rbatch':
3042
+ if x0.shape is None:
3043
+ x0 = gvar.BufferDict(x0, rbatch_buf=x0.buf.reshape(x0.buf.shape + (1,)))
3044
+ if uses_jac:
3045
+ jac0 = gvar.BufferDict(jac0, rbatch_buf=x0.buf.reshape(jac0.buf.shape + (1,)))
3046
+ else:
3047
+ x0 = x0.reshape(x0.shape + (1,))
3048
+ if uses_jac:
3049
+ jac0 = jac0.reshape(jac0.shape + (1,))
3050
+ fx = fcn(x0, jac=jac0) if uses_jac else fcn(x0)
3051
+ if hasattr(fx, 'keys'):
3052
+ # build dictionary for non-batch version of function
3053
+ fxs = gvar.BufferDict()
3054
+ for k in fx:
3055
+ fxs[k] = numpy.asarray(fx[k])[..., 0]
3056
+ self.shape = None
3057
+ self.bdict = fxs
3058
+ self.size = self.bdict.size
3059
+ _eval = _BatchIntegrand_from_BatchDict(fcn, self.bdict, rbatch=True, xsample=xsample)
3060
+ else:
3061
+ self.shape = numpy.shape(fx)[:-1]
3062
+ self.size = numpy.prod(self.shape, dtype=type(self.size))
3063
+ _eval = _BatchIntegrand_from_Batch(fcn, rbatch=True, xsample=xsample)
3064
+ else:
3065
+ if x0.shape is None:
3066
+ x0 = gvar.BufferDict(x0, lbatch_buf=x0.buf.reshape((1,) + x0.buf.shape ))
3067
+ if uses_jac:
3068
+ jac0 = gvar.BufferDict(jac0, lbatch_buf=x0.buf.reshape((1,) + jac0.buf.shape))
3069
+ else:
3070
+ x0 = x0.reshape((1,) + x0.shape)
3071
+ if uses_jac:
3072
+ jac0 = jac0.reshape((1,) + jac0.shape )
3073
+ fx = fcn(x0) if jac0 is None else fcn(x0, jac=jac0)
3074
+ if hasattr(fx, 'keys'):
3075
+ # build dictionary for non-batch version of function
3076
+ fxs = gvar.BufferDict()
3077
+ for k in fx:
3078
+ fxs[k] = fx[k][0]
3079
+ self.shape = None
3080
+ self.bdict = fxs
3081
+ self.size = self.bdict.size
3082
+ _eval = _BatchIntegrand_from_BatchDict(fcn, self.bdict, rbatch=False, xsample=xsample)
3083
+ else:
3084
+ fx = numpy.asarray(fx)
3085
+ self.shape = fx.shape[1:]
3086
+ self.size = numpy.prod(self.shape, dtype=type(self.size))
3087
+ _eval = _BatchIntegrand_from_Batch(fcn, rbatch=False, xsample=xsample)
3088
+ if self.mpi_nproc > 1:
3089
+ # MPI multiprocessor mode
3090
+ def _mpi_eval(x, jac, self=self, _eval=_eval):
3091
+ nx = x.shape[0] // self.mpi_nproc + 1
3092
+ i0 = self.rank * nx
3093
+ i1 = min(i0 + nx, x.shape[0])
3094
+ f = numpy.empty((nx, self.size), float)
3095
+ if i1 > i0:
3096
+ # fill f so long as haven't gone off end
3097
+ if jac is None:
3098
+ f[:(i1-i0)] = _eval(x[i0:i1], jac=None)
3099
+ else:
3100
+ f[:(i1-i0)] = _eval(x[i0:i1], jac=jac[i0:i1])
3101
+ results = numpy.empty((self.mpi_nproc * nx, self.size), float)
3102
+ self.comm.Allgather(f, results)
3103
+ return results[:x.shape[0]]
3104
+ self.eval = _mpi_eval
3105
+ else:
3106
+ self.eval = _eval
3107
+
3108
+ def _remove_gvars(self, gvlist):
3109
+ tmp = copy.copy(self)
3110
+ tmp.eval = gvar.remove_gvars(tmp.eval, gvlist)
3111
+ return tmp
3112
+
3113
+ def _distribute_gvars(self, gvlist):
3114
+ self.eval = gvar.distribute_gvars(self.eval, gvlist)
3115
+
3116
+ def __call__(self, x, jac=None):
3117
+ r""" Non-batch version of fcn """
3118
+ # repack x as lbatch array and evaluate function via eval
3119
+ if hasattr(x, 'keys'):
3120
+ x = gvar.asbufferdict(x)
3121
+ x = x.buf.reshape(1, -1)
3122
+ else:
3123
+ x = numpy.asarray(x).reshape(1, -1)
3124
+ fx = self.eval(x, jac=jac)
3125
+ return self.format_result(fx)
3126
+
3127
+ def format_result(self, mean, var=None):
3128
+ r""" Reformat output from integrator to correspond to original output format """
3129
+ if var is None:
3130
+ # mean is an ndarray
3131
+ if self.shape is None:
3132
+ return gvar.BufferDict(self.bdict, buf=mean.reshape(-1))
3133
+ elif self.shape == ():
3134
+ return mean.flat[0]
3135
+ else:
3136
+ return mean.reshape(self.shape)
3137
+ else:
3138
+ # from Integrator.call
3139
+ if var.shape == mean.shape:
3140
+ var = numpy.asarray(var) ** 0.5
3141
+
3142
+ if self.shape is None:
3143
+ return gvar.BufferDict(self.bdict, buf=gvar.gvar(mean, var).reshape(-1))
3144
+ elif self.shape == ():
3145
+ return gvar.gvar(mean[0], var[0,0] ** 0.5 if var.shape != mean.shape else var[0])
3146
+ else:
3147
+ return gvar.gvar(mean, var).reshape(self.shape)
3148
+
3149
+ def format_evalx(self, evalx):
3150
+ r""" Reformat output from eval(x).
3151
+
3152
+ ``self.eval(x)`` returns an array ``evalx[i,d]`` where ``i`` is the batch index and ``d``
3153
+ labels different components of the ``self.fcn`` output. ``self.format_evalx(evalx)``
3154
+ reformats that output into a dictionary or array corresponding to the original output
3155
+ from ``self.fcn``.
3156
+ """
3157
+ if self.shape is None:
3158
+ return gvar.BufferDict(self.bdict, lbatch_buf=evalx)
3159
+ else:
3160
+ return evalx.reshape(evalx.shape[:1] + self.shape)
3161
+
3162
+ def training(self, x, jac):
3163
+ r""" Calculate first element of integrand at point ``x``. """
3164
+ fx =self.eval(x, jac=jac)
3165
+ if fx.ndim == 1:
3166
+ return fx
3167
+ else:
3168
+ fx = fx.reshape((x.shape[0], -1))
3169
+ return fx[:, 0]
3170
+
3171
+ # The _BatchIntegrand_from_XXXX objects are used by VegasIntegrand
3172
+ # to convert different types of integrand (ie, scalar vs array vs dict,
3173
+ # and nonbatch vs batch) to the standard output format assumed internally
3174
+ # in vegas.
3175
+ cdef class _BatchIntegrand_from_Base(object):
3176
+ cdef readonly object xsample
3177
+ cdef readonly bint dict_arg
3178
+ cdef readonly bint std_arg
3179
+ cdef public object fcn
3180
+ """ Base class for following classes -- manages xsample """
3181
+
3182
+ def __init__(self, fcn, xsample):
3183
+ self.fcn = fcn
3184
+ self.xsample = xsample
3185
+ if xsample.shape is None:
3186
+ self.dict_arg = True
3187
+ self.std_arg = False
3188
+ else:
3189
+ self.dict_arg = False
3190
+ self.std_arg = True if len(xsample.shape) == 1 else False
3191
+
3192
+ def _remove_gvars(self, gvlist):
3193
+ tmp = copy.copy(self)
3194
+ tmp.fcn = gvar.remove_gvars(tmp.fcn, gvlist)
3195
+ return tmp
3196
+
3197
+ def _distribute_gvars(self, gvlist):
3198
+ self.fcn = gvar.distribute_gvars(self.fcn, gvlist)
3199
+
3200
+ def non_std_arg_fcn(self, x, jac=None):
3201
+ " fcn(x) for non-standard non-batch functions "
3202
+ x = numpy.asarray(x)
3203
+ if self.dict_arg:
3204
+ xd = gvar.BufferDict(self.xsample, buf=x)
3205
+ if jac is not None:
3206
+ jacd = gvar.BufferDict(self.xsample, buf=jac)
3207
+ return self.fcn(xd, jacd)
3208
+ else:
3209
+ return self.fcn(xd)
3210
+ elif jac is None:
3211
+ return self.fcn(x.reshape(self.xsample.shape))
3212
+ else:
3213
+ return self.fcn(x.reshape(self.xsample.shape))
3214
+
3215
+ def non_std_arg_batch_fcn(self, x, jac=None):
3216
+ " fcn(x) for non-standard batch functions "
3217
+ x = numpy.asarray(x)
3218
+ if self.dict_arg:
3219
+ if self.rbatch:
3220
+ xd = gvar.BufferDict(self.xsample, rbatch_buf=x.T)
3221
+ if jac is not None:
3222
+ jac = gvar.BufferDict(self.xsample, rbatch_buf=jac.T)
3223
+ else:
3224
+ xd = gvar.BufferDict(self.xsample, lbatch_buf=x)
3225
+ if jac is not None:
3226
+ jac = gvar.BufferDict(self.xsample, lbatch_buf=jac)
3227
+ return self.fcn(xd) if jac is None else self.fcn(xd, jac=jac)
3228
+ else:
3229
+ if self.rbatch:
3230
+ sh = self.xsample.shape + (-1,)
3231
+ return self.fcn(x.T.reshape(sh)) if jac is None else self.fcn(x.T.reshape(sh), jac=jac.T.reshape(sh))
3232
+ else:
3233
+ sh = (-1,) + self.xsample.shape
3234
+ return self.fcn(x.reshape(sh)) if jac is None else self.fcn(x.reshape(sh), jac=jac.reshape(sh))
3235
+
3236
+ cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3237
+ cdef readonly Py_ssize_t size
3238
+ cdef readonly object shape
3239
+ """ Batch integrand from non-batch integrand. """
3240
+
3241
+ def __init__(self, fcn, size, shape, xsample):
3242
+ self.size = size
3243
+ self.shape = shape
3244
+ super(_BatchIntegrand_from_NonBatch, self).__init__(fcn, xsample)
3245
+
3246
+ def __call__(self, double[:, :] x, jac=None):
3247
+ cdef Py_ssize_t i, j
3248
+ cdef double[:, ::1] f
3249
+ cdef const double[:] fx
3250
+ _f = numpy.empty(
3251
+ (x.shape[0], self.size), float
3252
+ )
3253
+ f = _f
3254
+ if self.shape == ():
3255
+ # very common special case
3256
+ for i in range(x.shape[0]):
3257
+ if self.std_arg:
3258
+ f[i, 0] = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3259
+ else:
3260
+ f[i, 0] = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3261
+ else:
3262
+ for i in range(x.shape[0]):
3263
+ if self.std_arg:
3264
+ fx = numpy.asarray(
3265
+ self.fcn(numpy.asarray(x[i])) if jac is None else self.fcn(numpy.asarray(x[i]), jac=jac[i])
3266
+ ).reshape((-1,))
3267
+ else:
3268
+ fx = numpy.asarray(
3269
+ self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3270
+ ).reshape((-1,))
3271
+ for j in range(len(fx)):
3272
+ f[i, j] = fx[j]
3273
+ return _f
3274
+
3275
+ cdef class _BatchIntegrand_from_NonBatchDict(_BatchIntegrand_from_Base):
3276
+ cdef readonly Py_ssize_t size
3277
+ """ Batch integrand from non-batch dict-integrand. """
3278
+
3279
+ def __init__(self, fcn, size, xsample=None):
3280
+ self.size = size
3281
+ super(_BatchIntegrand_from_NonBatchDict, self).__init__(fcn, xsample)
3282
+
3283
+ def __call__(self, double[:, :] x, jac=None):
3284
+ cdef Py_ssize_t i, j
3285
+ cdef double[:, ::1] f
3286
+ _f = numpy.empty(
3287
+ (x.shape[0], self.size), float
3288
+ )
3289
+ f = _f
3290
+ for i in range(x.shape[0]):
3291
+ if self.std_arg:
3292
+ fx = self.fcn(numpy.asarray(x[i])) if jac is None else self.fcn(numpy.asarray(x[i]), jac=jac[i])
3293
+ else:
3294
+ fx = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3295
+ if not isinstance(fx, gvar.BufferDict):
3296
+ fx = gvar.BufferDict(fx)
3297
+ for j in range(f.shape[1]):
3298
+ f[i, j] = fx.buf[j]
3299
+ return _f
3300
+
3301
+ cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3302
+ cdef readonly object shape
3303
+ cdef readonly bint rbatch
3304
+ """ batch integrand from batch function. """
3305
+
3306
+ def __init__(self, fcn, rbatch=False, xsample=None):
3307
+ self.rbatch = rbatch
3308
+ super(_BatchIntegrand_from_Batch, self).__init__(fcn, xsample)
3309
+
3310
+ def __call__(self, x, jac=None):
3311
+ # call fcn(x)
3312
+ if self.std_arg:
3313
+ if self.rbatch:
3314
+ fx = self.fcn(x.T) if jac is None else self.fcn(x.T, jac=jac.T)
3315
+ else:
3316
+ fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3317
+ else:
3318
+ fx = self.non_std_arg_batch_fcn(x, jac)
3319
+
3320
+ # process result
3321
+ if self.rbatch:
3322
+ # fx = self.fcn(x.T) if jac is None else self.fcn(x, jac=jac.T)
3323
+ if not isinstance(fx, numpy.ndarray):
3324
+ fx = numpy.asarray(fx)
3325
+ fx = fx.reshape((-1, x.shape[0]))
3326
+ return numpy.ascontiguousarray(fx.T)
3327
+ else:
3328
+ # fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3329
+ if not isinstance(fx, numpy.ndarray):
3330
+ fx = numpy.asarray(fx)
3331
+ return fx.reshape((x.shape[0], -1))
3332
+
3333
+
3334
+ cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3335
+ cdef readonly Py_ssize_t size
3336
+ cdef readonly object slice
3337
+ cdef readonly object shape
3338
+ cdef readonly bint rbatch
3339
+ """ batch integrand from batch dict-integrand. """
3340
+
3341
+ def __init__(self, fcn, bdict, rbatch=False, xsample=None):
3342
+ self.size = bdict.size
3343
+ self.rbatch = rbatch
3344
+ self.slice = collections.OrderedDict()
3345
+ self.shape = collections.OrderedDict()
3346
+ for k in bdict:
3347
+ self.slice[k], self.shape[k] = bdict.slice_shape(k)
3348
+ super(_BatchIntegrand_from_BatchDict, self).__init__(fcn, xsample)
3349
+
3350
+ def __call__(self, x, jac=None):
3351
+ cdef Py_ssize_t i
3352
+ # cdef double[:, ::1] buf
3353
+ buf = numpy.empty(
3354
+ (x.shape[0], self.size), float
3355
+ )
3356
+ # buf = _buf
3357
+ # call fcn(x)
3358
+ if self.std_arg:
3359
+ if self.rbatch:
3360
+ fx = self.fcn(x.T) if jac is None else self.fcn(x, jac=jac.T)
3361
+ else:
3362
+ fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3363
+ else:
3364
+ fx = self.non_std_arg_batch_fcn(x, jac)
3365
+
3366
+ # process result
3367
+ if self.rbatch:
3368
+ # fx = self.fcn(x.T) if jac is None else self.fcn(x.T, jac=jac.T)
3369
+ for k in self.slice:
3370
+ buf[:, self.slice[k]] = (
3371
+ fx[k]
3372
+ if self.shape[k] is () else
3373
+ numpy.reshape(fx[k], (-1, x.shape[0])).T
3374
+ )
3375
+ else:
3376
+ # fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3377
+ for k in self.slice:
3378
+ buf[:, self.slice[k]] = (
3379
+ fx[k]
3380
+ if self.shape[k] is () else
3381
+ numpy.asarray(fx[k]).reshape((x.shape[0], -1))
3382
+ )
3383
+ return buf
3384
+
3385
+ # LBatchIntegrand and RBatchIntegrand are container classes for batch integrands.
3386
+ cdef class LBatchIntegrand(object):
3387
+ r""" Wrapper for lbatch integrands.
3388
+
3389
+ Used by :func:`vegas.lbatchintegrand`.
3390
+
3391
+ :class:`vegas.LBatchIntegrand` is the same as
3392
+ :class:`vegas.BatchIntegrand`.
3393
+ """
3394
+ # cdef public object fcn
3395
+ def __init__(self, fcn=None):
3396
+ self.fcn = self if fcn is None else fcn
3397
+
3398
+ property fcntype:
3399
+ def __get__(self):
3400
+ return 'lbatch'
3401
+
3402
+ def __call__(self, *args, **kargs):
3403
+ return self.fcn(*args, **kargs)
3404
+
3405
+ def __getattr__(self, attr):
3406
+ return getattr(self.fcn, attr)
3407
+
3408
+ def lbatchintegrand(f):
3409
+ r""" Decorator for batch integrand functions.
3410
+
3411
+ Applying :func:`vegas.lbatchintegrand` to a function ``fcn`` repackages
3412
+ the function in a format that |vegas| can understand. Appropriate
3413
+ functions take a :mod:`numpy` array of integration points ``x[i, d]``
3414
+ as an argument, where ``i=0...`` labels the integration point and
3415
+ ``d=0...`` labels direction, and return an array ``f[i]`` of
3416
+ integrand values (or arrays of integrand values) for the corresponding
3417
+ points. The meaning of ``fcn(x)`` is unchanged by the decorator.
3418
+
3419
+ An example is ::
3420
+
3421
+ import vegas
3422
+ import numpy as np
3423
+
3424
+ @vegas.lbatchintegrand # or @vegas.lbatchintegrand
3425
+ def f(x):
3426
+ return np.exp(-x[:, 0] - x[:, 1])
3427
+
3428
+ for the two-dimensional integrand :math:`\exp(-x_0 - x_1)`.
3429
+ When integrands have dictionary arguments ``xd``, each element of the
3430
+ dictionary has an extra index (on the left): ``xd[key][:, ...]``.
3431
+
3432
+ :func:`vegas.batchintegrand` is the same as :func:`vegas.lbatchintegrand`.
3433
+ """
3434
+ try:
3435
+ f.fcntype = 'lbatch'
3436
+ return f
3437
+ except:
3438
+ return LBatchIntegrand(f)
3439
+
3440
+ cdef class RBatchIntegrand(object):
3441
+ r""" Same as :class:`vegas.LBatchIntegrand` but with batch indices on the right (not left). """
3442
+ # cdef public object fcn
3443
+ def __init__(self, fcn=None):
3444
+ self.fcn = self if fcn is None else fcn
3445
+
3446
+ property fcntype:
3447
+ def __get__(self):
3448
+ return 'rbatch'
3449
+
3450
+ def __call__(self, *args, **kargs):
3451
+ return self.fcn(*args, **kargs)
3452
+
3453
+ def __getattr__(self, attr):
3454
+ return getattr(self.fcn, attr)
3455
+
3456
+
3457
+ def rbatchintegrand(f):
3458
+ r""" Same as :func:`vegas.lbatchintegrand` but with batch indices on the right (not left). """
3459
+ try:
3460
+ f.fcntype = 'rbatch'
3461
+ return f
3462
+ except:
3463
+ return RBatchIntegrand(f)
3464
+
3465
+ # legacy names
3466
+ batchintegrand = lbatchintegrand
3467
+ BatchIntegrand = LBatchIntegrand
3468
+
3469
+ vecintegrand = batchintegrand
3470
+ MPIintegrand = batchintegrand
3471
+
3472
+ class VecIntegrand(LBatchIntegrand):
3473
+ pass