vegas 6.2.1__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vegas might be problematic. Click here for more details.

vegas/_vegas.pyx ADDED
@@ -0,0 +1,3404 @@
1
+ # cython: language_level=3str, binding=True, boundscheck=False
2
+ # c#ython: profile=True
3
+
4
+ # Created by G. Peter Lepage (Cornell University) in 12/2013.
5
+ # Copyright (c) 2013-25 G. Peter Lepage.
6
+ #
7
+ # This program is free software: you can redistribute it and/or modify
8
+ # it under the terms of the GNU General Public License as published by
9
+ # the Free Software Foundation, either version 3 of the License, or
10
+ # any later version (see <http://www.gnu.org/licenses/>).
11
+ #
12
+ # This program is distributed in the hope that it will be useful,
13
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
14
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
+ # GNU General Public License for more details.
16
+
17
+ from libc.math cimport floor, log, abs, tanh, erf, exp, sqrt
18
+
19
+ import collections
20
+ import copy
21
+ import inspect
22
+ import math
23
+ import multiprocessing
24
+ import pickle
25
+ import os
26
+ import sys
27
+ import tempfile
28
+ import time
29
+ import warnings
30
+
31
+ import numpy
32
+ import gvar
33
+
34
+ cdef double TINY = 10 ** (sys.float_info.min_10_exp + 50) # smallest and biggest
35
+ cdef double HUGE = 10 ** (sys.float_info.max_10_exp - 50) # with extra headroom
36
+ cdef double EPSILON = sys.float_info.epsilon * 1e4 # roundoff error threshold (see Schubert and Gertz Table 2)
37
+
38
+ # AdaptiveMap is used by Integrator
39
+ cdef class AdaptiveMap:
40
+ r""" Adaptive map ``y->x(y)`` for multidimensional ``y`` and ``x``.
41
+
42
+ An :class:`AdaptiveMap` defines a multidimensional map ``y -> x(y)``
43
+ from the unit hypercube, with ``0 <= y[d] <= 1``, to an arbitrary
44
+ hypercube in ``x`` space. Each direction is mapped independently
45
+ with a Jacobian that is tunable (i.e., "adaptive").
46
+
47
+ The map is specified by a grid in ``x``-space that, by definition,
48
+ maps into a uniformly spaced grid in ``y``-space. The nodes of
49
+ the grid are specified by ``grid[d, i]`` where d is the
50
+ direction (``d=0,1...dim-1``) and ``i`` labels the grid point
51
+ (``i=0,1...N``). The mapping for a specific point ``y`` into
52
+ ``x`` space is::
53
+
54
+ y[d] -> x[d] = grid[d, i(y[d])] + inc[d, i(y[d])] * delta(y[d])
55
+
56
+ where ``i(y)=floor(y*N``), ``delta(y)=y*N - i(y)``, and
57
+ ``inc[d, i] = grid[d, i+1] - grid[d, i]``. The Jacobian for this map, ::
58
+
59
+ dx[d]/dy[d] = inc[d, i(y[d])] * N,
60
+
61
+ is piece-wise constant and proportional to the ``x``-space grid
62
+ spacing. Each increment in the ``x``-space grid maps into an increment of
63
+ size ``1/N`` in the corresponding ``y`` space. So regions in
64
+ ``x`` space where ``inc[d, i]`` is small are stretched out
65
+ in ``y`` space, while larger increments are compressed.
66
+
67
+ The ``x`` grid for an :class:`AdaptiveMap` can be specified explicitly
68
+ when the map is created: for example, ::
69
+
70
+ m = AdaptiveMap([[0, 0.1, 1], [-1, 0, 1]])
71
+
72
+ creates a two-dimensional map where the ``x[0]`` interval ``(0,0.1)``
73
+ and ``(0.1,1)`` map into the ``y[0]`` intervals ``(0,0.5)`` and
74
+ ``(0.5,1)`` respectively, while ``x[1]`` intervals ``(-1,0)``
75
+ and ``(0,1)`` map into ``y[1]`` intervals ``(0,0.5)`` and ``(0.5,1)``.
76
+
77
+ More typically, an uniform map with ``ninc`` increments
78
+ is first created: for example, ::
79
+
80
+ m = AdaptiveMap([[0, 1], [-1, 1]], ninc=1000)
81
+
82
+ creates a two-dimensional grid, with 1000 increments in each direction,
83
+ that spans the volume ``0<=x[0]<=1``, ``-1<=x[1]<=1``. This map is then
84
+ trained with data ``f[j]`` corresponding to ``ny`` points ``y[j, d]``,
85
+ with ``j=0...ny-1``, (usually) uniformly distributed in |y| space:
86
+ for example, ::
87
+
88
+ m.add_training_data(y, f)
89
+ m.adapt(alpha=1.5)
90
+
91
+ ``m.adapt(alpha=1.5)`` shrinks grid increments where ``f[j]``
92
+ is large, and expands them where ``f[j]`` is small. Usually
93
+ one has to iterate over several sets of ``y``\s and ``f``\s
94
+ before the grid has fully adapted.
95
+
96
+ The speed with which the grid adapts is determined by parameter ``alpha``.
97
+ Large (positive) values imply rapid adaptation, while small values (much
98
+ less than one) imply slow adaptation. As in any iterative process that
99
+ involves random numbers, it is usually a good idea to slow adaptation
100
+ down in order to avoid instabilities caused by random fluctuations.
101
+
102
+ Args:
103
+ grid (list of arrays): Initial ``x`` grid, where ``grid[d][i]``
104
+ is the ``i``-th node in direction ``d``. Different directions
105
+ can have different numbers of nodes.
106
+ ninc (int or array or ``None``): ``ninc[d]`` (or ``ninc``, if it
107
+ is a number) is the number of increments along direction ``d``
108
+ in the new ``x`` grid. The new grid is designed to give the same
109
+ Jacobian ``dx(y)/dy`` as the original grid. The default value,
110
+ ``ninc=None``, leaves the grid unchanged.
111
+ """
112
+ def __init__(self, grid, ninc=None):
113
+ cdef Py_ssize_t i, d, dim
114
+ cdef double griddi
115
+ if isinstance(grid, AdaptiveMap):
116
+ self.ninc = numpy.array(grid.ninc)
117
+ self.inc = numpy.array(grid.inc)
118
+ self.grid = numpy.array(grid.grid)
119
+ else:
120
+ dim = len(grid)
121
+ len_g = numpy.array([len(x) for x in grid], dtype=numpy.intp)
122
+ if min(len_g) < 2:
123
+ raise ValueError('grid[d] must have at least 2 elements, not {}'.format(min(len_g)))
124
+ self.ninc = len_g - 1
125
+ self.inc = numpy.empty((dim, max(len_g)-1), float)
126
+ self.grid = numpy.empty((dim, self.inc.shape[1] +1), float)
127
+ for d in range(dim):
128
+ for i, griddi in enumerate(sorted(grid[d])):
129
+ self.grid[d, i] = griddi
130
+ for i in range(len_g[d] - 1):
131
+ self.inc[d, i] = self.grid[d, i + 1] - self.grid[d, i]
132
+ self.clear()
133
+ if ninc is not None and not numpy.all(ninc == self.ninc):
134
+ if numpy.all(numpy.asarray(self.ninc) == 1):
135
+ self.make_uniform(ninc=ninc)
136
+ else:
137
+ self.adapt(ninc=ninc)
138
+
139
+ property dim:
140
+ " Number of dimensions."
141
+ def __get__(self):
142
+ return self.grid.shape[0]
143
+
144
+ def region(self, Py_ssize_t d=-1):
145
+ r""" x-space region.
146
+
147
+ ``region(d)`` returns a tuple ``(xl,xu)`` specifying the ``x``-space
148
+ interval covered by the map in direction ``d``. A list containing
149
+ the intervals for each direction is returned if ``d`` is omitted.
150
+ """
151
+ if d < 0:
152
+ return [self.region(d) for d in range(self.dim)]
153
+ else:
154
+ return (self.grid[d, 0], self.grid[d, self.ninc[d]])
155
+
156
+ def extract_grid(self):
157
+ " Return a list of lists specifying the map's grid. "
158
+ cdef Py_ssize_t d
159
+ grid = []
160
+ for d in range(self.dim):
161
+ ng = self.ninc[d] + 1
162
+ grid.append(list(self.grid[d, :ng]))
163
+ return grid
164
+
165
+ def __reduce__(self):
166
+ r""" Capture state for pickling. """
167
+ return (AdaptiveMap, (self.extract_grid(),))
168
+
169
+ def settings(self, ngrid=5):
170
+ r""" Create string with information about grid nodes.
171
+
172
+ Creates a string containing the locations of the nodes
173
+ in the map grid for each direction. Parameter
174
+ ``ngrid`` specifies the maximum number of nodes to print
175
+ (spread evenly over the grid).
176
+ """
177
+ cdef Py_ssize_t d
178
+ ans = []
179
+ if ngrid > 0:
180
+ for d in range(self.dim):
181
+ grid_d = numpy.array(self.grid[d, :self.ninc[d] + 1])
182
+ nskip = int(self.ninc[d] // ngrid)
183
+ if nskip<1:
184
+ nskip = 1
185
+ start = nskip // 2
186
+ ans += [
187
+ " grid[%2d] = %s"
188
+ % (
189
+ d,
190
+ numpy.array2string(
191
+ grid_d[start::nskip], precision=3,
192
+ prefix=' grid[xx] = ')
193
+ )
194
+ ]
195
+ return '\n'.join(ans) + '\n'
196
+
197
+ def random(self, n=None):
198
+ " Create ``n`` random points in |y| space. "
199
+ if n is None:
200
+ y = gvar.RNG.random(self.dim)
201
+ else:
202
+ y = gvar.RNG.random((n, self.dim))
203
+ return self(y)
204
+
205
+ def make_uniform(self, ninc=None):
206
+ r""" Replace the grid with a uniform grid.
207
+
208
+ The new grid has ``ninc[d]`` (or ``ninc``, if it is a number)
209
+ increments along each direction if ``ninc`` is specified.
210
+ If ``ninc=None`` (default), the new grid has the same number
211
+ of increments in each direction as the old grid.
212
+ """
213
+ cdef Py_ssize_t i, d
214
+ cdef Py_ssize_t dim = self.grid.shape[0]
215
+ cdef double[:] tmp
216
+ cdef double[:, ::1] new_grid
217
+ if ninc is None:
218
+ ninc = numpy.asarray(self.ninc)
219
+ elif numpy.shape(ninc) == ():
220
+ ninc = numpy.full(self.dim, int(ninc), dtype=numpy.intp)
221
+ elif numpy.shape(ninc) == (self.dim,):
222
+ ninc = numpy.asarray(ninc)
223
+ else:
224
+ raise ValueError('ninc has wrong shape -- {}'.format(numpy.shape(ninc)))
225
+ if min(ninc) < 1:
226
+ raise ValueError(
227
+ "no of increments < 1 in AdaptiveMap -- %s"
228
+ % str(ninc)
229
+ )
230
+ new_inc = numpy.empty((dim, max(ninc)), float)
231
+ new_grid = numpy.empty((dim, new_inc.shape[1] + 1), float)
232
+ for d in range(dim):
233
+ tmp = numpy.linspace(self.grid[d, 0], self.grid[d, self.ninc[d]], ninc[d] + 1)
234
+ for i in range(ninc[d] + 1):
235
+ new_grid[d, i] = tmp[i]
236
+ for i in range(ninc[d]):
237
+ new_inc[d, i] = new_grid[d, i + 1] - new_grid[d, i]
238
+ self.ninc = ninc
239
+ self.grid = new_grid
240
+ self.inc = new_inc
241
+ self.clear()
242
+
243
+ def __call__(self, y):
244
+ r""" Return ``x`` values corresponding to ``y``.
245
+
246
+ ``y`` can be a single ``dim``-dimensional point, or it
247
+ can be an array ``y[i,j, ..., d]`` of such points (``d=0..dim-1``).
248
+
249
+ If ``y=None`` (default), ``y`` is set equal to a (uniform) random point
250
+ in the volume.
251
+ """
252
+
253
+ if y is None:
254
+ y = gvar.RNG.random(size=self.dim)
255
+ else:
256
+ y = numpy.asarray(y, float)
257
+ y_shape = y.shape
258
+ y.shape = -1, y.shape[-1]
259
+ x = 0 * y
260
+ jac = numpy.empty(y.shape[0], float)
261
+ self.map(y, x, jac)
262
+ x.shape = y_shape
263
+ return x
264
+
265
+ def jac1d(self, y):
266
+ r""" Return the map's Jacobian at ``y`` for each direction.
267
+
268
+ ``y`` can be a single ``dim``-dimensional point, or it
269
+ can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
270
+ Returns an array ``jac`` where ``jac[i,j,...,d]`` is the
271
+ (one-dimensional) Jacobian (``dx[d]/dy[d]``) corresponding
272
+ to ``y[i,j,...,d]``.
273
+ """
274
+ cdef Py_ssize_t dim = self.grid.shape[0]
275
+ cdef Py_ssize_t i, d, ninc, ny, iy
276
+ cdef double y_ninc, dy_ninc
277
+ cdef double[:,::1] jac
278
+ y = numpy.asarray(y)
279
+ y_shape = y.shape
280
+ y.shape = -1, y.shape[-1]
281
+ ny = y.shape[0]
282
+ jac = numpy.empty(y.shape, float)
283
+ for i in range(ny):
284
+ for d in range(dim):
285
+ ninc = self.ninc[d]
286
+ y_ninc = y[i, d] * ninc
287
+ iy = <int>floor(y_ninc)
288
+ dy_ninc = y_ninc - iy
289
+ if iy < ninc:
290
+ jac[i, d] = self.inc[d, iy] * ninc
291
+ else:
292
+ jac[i, d] = self.inc[d, ninc - 1] * ninc
293
+ ans = numpy.asarray(jac)
294
+ ans.shape = y.shape
295
+ return ans
296
+
297
+ def jac(self, y):
298
+ r""" Return the map's Jacobian at ``y``.
299
+
300
+ ``y`` can be a single ``dim``-dimensional point, or it
301
+ can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
302
+ Returns an array ``jac`` where ``jac[i,j,...]`` is the
303
+ (multidimensional) Jacobian (``dx/dy``) corresponding
304
+ to ``y[i,j,...]``.
305
+ """
306
+ return numpy.prod(self.jac1d(y), axis=-1)
307
+
308
+ # @cython.boundscheck(False)
309
+ # @cython.wraparound(False)
310
+ cpdef map(
311
+ self,
312
+ double[:, ::1] y,
313
+ double[:, ::1] x,
314
+ double[::1] jac,
315
+ Py_ssize_t ny=-1
316
+ ):
317
+ r""" Map y to x, where jac is the Jacobian (``dx/dy``).
318
+
319
+ ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
320
+ ``x[j, d]`` is filled with the corresponding ``x`` values,
321
+ and ``jac[j]`` is filled with the corresponding Jacobian
322
+ values. ``x`` and ``jac`` must be preallocated: for example, ::
323
+
324
+ x = numpy.empty(y.shape, float)
325
+ jac = numpy.empty(y.shape[0], float)
326
+
327
+ Args:
328
+ y (array): ``y`` values to be mapped. ``y`` is a contiguous
329
+ 2-d array, where ``y[j, d]`` contains values for points
330
+ along direction ``d``.
331
+ x (array): Container for ``x[j, d]`` values corresponding
332
+ to ``y[j, d]``. Must be a contiguous 2-d array.
333
+ jac (array): Container for Jacobian values ``jac[j]`` (``= dx/dy``)
334
+ corresponding to ``y[j, d]``. Must be a contiguous 1-d array.
335
+ ny (int): Number of ``y`` points: ``y[j, d]`` for ``d=0...dim-1``
336
+ and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
337
+ omitted (or negative).
338
+ """
339
+ cdef Py_ssize_t ninc
340
+ cdef Py_ssize_t dim = self.inc.shape[0]
341
+ cdef Py_ssize_t i, iy, d
342
+ cdef double y_ninc, dy_ninc, tmp_jac
343
+ if ny < 0:
344
+ ny = y.shape[0]
345
+ elif ny > y.shape[0]:
346
+ raise ValueError('ny > y.shape[0]: %d > %d' % (ny, y.shape[0]))
347
+ for i in range(ny):
348
+ jac[i] = 1.
349
+ for d in range(dim):
350
+ ninc = self.ninc[d]
351
+ y_ninc = y[i, d] * ninc
352
+ iy = <int>floor(y_ninc)
353
+ dy_ninc = y_ninc - iy
354
+ if iy < ninc:
355
+ x[i, d] = self.grid[d, iy] + self.inc[d, iy] * dy_ninc
356
+ jac[i] *= self.inc[d, iy] * ninc
357
+ else:
358
+ x[i, d] = self.grid[d, ninc]
359
+ jac[i] *= self.inc[d, ninc - 1] * ninc
360
+ return
361
+
362
+ cpdef invmap(
363
+ self,
364
+ double[:, ::1] x,
365
+ double[:, ::1] y,
366
+ double[::1] jac,
367
+ Py_ssize_t nx=-1
368
+ ):
369
+ r""" Map x to y, where jac is the Jacobian (``dx/dy``).
370
+
371
+ ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
372
+ ``x[j, d]`` is filled with the corresponding ``x`` values,
373
+ and ``jac[j]`` is filled with the corresponding Jacobian
374
+ values. ``x`` and ``jac`` must be preallocated: for example, ::
375
+
376
+ x = numpy.empty(y.shape, float)
377
+ jac = numpy.empty(y.shape[0], float)
378
+
379
+ Args:
380
+ x (array): ``x`` values to be mapped to ``y``-space. ``x``
381
+ is a contiguous 2-d array, where ``x[j, d]`` contains
382
+ values for points along direction ``d``.
383
+ y (array): Container for ``y[j, d]`` values corresponding
384
+ to ``x[j, d]``. Must be a contiguous 2-d array
385
+ jac (array): Container for Jacobian values ``jac[j]`` (``= dx/dy``)
386
+ corresponding to ``y[j, d]``. Must be a contiguous 1-d array
387
+ nx (int): Number of ``x`` points: ``x[j, d]`` for ``d=0...dim-1``
388
+ and ``j=0...nx-1``. ``nx`` is set to ``x.shape[0]`` if it is
389
+ omitted (or negative).
390
+ """
391
+ cdef Py_ssize_t ninc
392
+ cdef Py_ssize_t dim = self.inc.shape[0]
393
+ cdef Py_ssize_t[:] iy
394
+ cdef Py_ssize_t i, iyi, d
395
+ cdef double y_ninc, dy_ninc, tmp_jac
396
+ if nx < 0:
397
+ nx = x.shape[0]
398
+ elif nx > x.shape[0]:
399
+ raise ValueError('nx > x.shape[0]: %d > %d' % (nx, x.shape[0]))
400
+ for i in range(nx):
401
+ jac[i] = 1.
402
+ for d in range(dim):
403
+ ninc = self.ninc[d]
404
+ iy = numpy.searchsorted(self.grid[d, :], x[:, d], side='right')
405
+ for i in range(nx):
406
+ if iy[i] > 0 and iy[i] <= ninc:
407
+ iyi = iy[i] - 1
408
+ y[i, d] = (iyi + (x[i, d] - self.grid[d, iyi]) / self.inc[d, iyi]) / ninc
409
+ jac[i] *= self.inc[d, iyi] * ninc
410
+ elif iy[i] <= 0:
411
+ y[i, d] = 0.
412
+ jac[i] *= self.inc[d, 0] * ninc
413
+ elif iy[i] > ninc:
414
+ y[i, d] = 1.0
415
+ jac[i] *= self.inc[d, ninc - 1] * ninc
416
+ return
417
+
418
+
419
+ # @cython.boundscheck(False)
420
+ # @cython.wraparound(False)
421
+ cpdef add_training_data(
422
+ self,
423
+ double[:, ::1] y,
424
+ double[::1] f,
425
+ Py_ssize_t ny=-1,
426
+ ):
427
+ r""" Add training data ``f`` for ``y``-space points ``y``.
428
+
429
+ Accumulates training data for later use by ``self.adapt()``.
430
+ Grid increments will be made smaller in regions where
431
+ ``f`` is larger than average, and larger where ``f``
432
+ is smaller than average. The grid is unchanged (converged?)
433
+ when ``f`` is constant across the grid.
434
+
435
+ Args:
436
+ y (array): ``y`` values corresponding to the training data.
437
+ ``y`` is a contiguous 2-d array, where ``y[j, d]``
438
+ is for points along direction ``d``.
439
+ f (array): Training function values. ``f[j]`` corresponds to
440
+ point ``y[j, d]`` in ``y``-space.
441
+ ny (int): Number of ``y`` points: ``y[j, d]`` for ``d=0...dim-1``
442
+ and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
443
+ omitted (or negative).
444
+ """
445
+ cdef Py_ssize_t ninc
446
+ cdef Py_ssize_t dim = self.inc.shape[0]
447
+ cdef Py_ssize_t iy
448
+ cdef Py_ssize_t i, d
449
+ if self.sum_f is None:
450
+ shape = (self.inc.shape[0], self.inc.shape[1])
451
+ self.sum_f = numpy.zeros(shape, float)
452
+ self.n_f = numpy.zeros(shape, float) + TINY
453
+ if ny < 0:
454
+ ny = y.shape[0]
455
+ elif ny > y.shape[0]:
456
+ raise ValueError('ny > y.shape[0]: %d > %d' % (ny, y.shape[0]))
457
+ for d in range(dim):
458
+ ninc = self.ninc[d]
459
+ for i in range(ny):
460
+ if y[i, d] > 0 and y[i, d] < 1:
461
+ iy = <int> floor(y[i, d] * ninc)
462
+ self.sum_f[d, iy] += abs(f[i])
463
+ self.n_f[d, iy] += 1
464
+ return
465
+
466
+ # @cython.boundscheck(False)
467
+ def adapt(self, double alpha=0.0, ninc=None):
468
+ r""" Adapt grid to accumulated training data.
469
+
470
+ ``self.adapt(...)`` projects the training data onto
471
+ each axis independently and maps it into ``x`` space.
472
+ It shrinks ``x``-grid increments in regions where the
473
+ projected training data is large, and grows increments
474
+ where the projected data is small. The grid along
475
+ any direction is unchanged if the training data
476
+ is constant along that direction.
477
+
478
+ The number of increments along a direction can be
479
+ changed by setting parameter ``ninc`` (array or number).
480
+
481
+ The grid does not change if no training data has
482
+ been accumulated, unless ``ninc`` is specified, in
483
+ which case the number of increments is adjusted
484
+ while preserving the relative density of increments
485
+ at different values of ``x``.
486
+
487
+ Args:
488
+ alpha (double): Determines the speed with which the grid
489
+ adapts to training data. Large (postive) values imply
490
+ rapid evolution; small values (much less than one) imply
491
+ slow evolution. Typical values are of order one. Choosing
492
+ ``alpha<0`` causes adaptation to the unmodified training
493
+ data (usually not a good idea).
494
+ ninc (int or array or None): The number of increments in the new
495
+ grid is ``ninc[d]`` (or ``ninc``, if it is a number)
496
+ in direction ``d``. The number is unchanged from the
497
+ old grid if ``ninc`` is omitted (or equals ``None``,
498
+ which is the default).
499
+ """
500
+ cdef double[:, ::1] new_grid
501
+ cdef double[::1] avg_f, tmp_f
502
+ cdef double sum_f, acc_f, f_ninc
503
+ cdef Py_ssize_t old_ninc
504
+ cdef Py_ssize_t dim = self.grid.shape[0]
505
+ cdef Py_ssize_t i, j
506
+ cdef Py_ssize_t[:] new_ninc
507
+
508
+ # initialization
509
+ if ninc is None:
510
+ new_ninc = numpy.array(self.ninc)
511
+ elif numpy.shape(ninc) == ():
512
+ new_ninc = numpy.full(dim, int(ninc), numpy.intp)
513
+ elif len(ninc) == dim:
514
+ new_ninc = numpy.array(ninc, numpy.intp)
515
+ else:
516
+ raise ValueError('badly formed ninc = ' + str(ninc))
517
+ if min(new_ninc) < 1:
518
+ raise ValueError('ninc < 1: ' + str(list(new_ninc)))
519
+ if max(new_ninc) == 1:
520
+ new_grid = numpy.empty((dim, 2), float)
521
+ for d in range(dim):
522
+ new_grid[d, 0] = self.grid[d, 0]
523
+ new_grid[d, 1] = self.grid[d, self.ninc[d]]
524
+ self.grid = numpy.asarray(new_grid)
525
+ self.inc = numpy.empty((dim, 1), float)
526
+ self.ninc = numpy.array(dim * [1], dtype=numpy.intp)
527
+ for d in range(dim):
528
+ self.inc[d, 0] = self.grid[d, 1] - self.grid[d, 0]
529
+ self.clear()
530
+ return
531
+
532
+ # smooth and regrid
533
+ new_grid = numpy.empty((dim, max(new_ninc) + 1), float)
534
+ avg_f = numpy.ones(self.inc.shape[1], float) # default = uniform
535
+ if alpha > 0 and max(self.ninc) > 1:
536
+ tmp_f = numpy.empty(self.inc.shape[1], float)
537
+ for d in range(dim):
538
+ old_ninc = self.ninc[d]
539
+ if alpha != 0 and old_ninc > 1:
540
+ if self.sum_f is not None:
541
+ for i in range(old_ninc):
542
+ if self.n_f[d, i] > 0:
543
+ avg_f[i] = self.sum_f[d, i] / self.n_f[d, i]
544
+ else:
545
+ avg_f[i] = 0.
546
+ if alpha > 0:
547
+ # smooth
548
+ tmp_f[0] = abs(7. * avg_f[0] + avg_f[1]) / 8.
549
+ tmp_f[old_ninc - 1] = abs(7. * avg_f[old_ninc - 1] + avg_f[old_ninc - 2]) / 8.
550
+ sum_f = tmp_f[0] + tmp_f[old_ninc - 1]
551
+ for i in range(1, old_ninc - 1):
552
+ tmp_f[i] = abs(6. * avg_f[i] + avg_f[i-1] + avg_f[i+1]) / 8.
553
+ sum_f += tmp_f[i]
554
+ if sum_f > 0:
555
+ for i in range(old_ninc):
556
+ avg_f[i] = tmp_f[i] / sum_f + TINY
557
+ else:
558
+ for i in range(old_ninc):
559
+ avg_f[i] = TINY
560
+ for i in range(old_ninc):
561
+ if avg_f[i] > 0 and avg_f[i] <= 0.99999999:
562
+ avg_f[i] = (-(1 - avg_f[i]) / log(avg_f[i])) ** alpha
563
+ # regrid
564
+ new_grid[d, 0] = self.grid[d, 0]
565
+ new_grid[d, new_ninc[d]] = self.grid[d, old_ninc]
566
+ i = 0 # new_x index
567
+ j = -1 # self_x index
568
+ acc_f = 0 # sum(avg_f) accumulated
569
+ f_ninc = 0.
570
+ for i in range(old_ninc):
571
+ f_ninc += avg_f[i]
572
+ f_ninc /= new_ninc[d] # amount of acc_f per new increment
573
+ for i in range(1, new_ninc[d]):
574
+ while acc_f < f_ninc:
575
+ j += 1
576
+ if j < old_ninc:
577
+ acc_f += avg_f[j]
578
+ else:
579
+ break
580
+ else:
581
+ acc_f -= f_ninc
582
+ new_grid[d, i] = (
583
+ self.grid[d, j+1]
584
+ - (acc_f / avg_f[j]) * self.inc[d, j]
585
+ )
586
+ continue
587
+ break
588
+ self.grid = numpy.asarray(new_grid)
589
+ self.inc = numpy.empty((dim, self.grid.shape[1] - 1), float)
590
+ for d in range(dim):
591
+ for i in range(new_ninc[d]):
592
+ self.inc[d, i] = self.grid[d, i + 1] - self.grid[d, i]
593
+ self.ninc = numpy.asarray(new_ninc)
594
+ self.clear()
595
+
596
+ def clear(self):
597
+ " Clear information accumulated by :meth:`AdaptiveMap.add_training_data`. "
598
+ self.sum_f = None
599
+ self.n_f = None
600
+
601
+ def show_grid(self, ngrid=40, axes=None, shrink=False, plotter=None):
602
+ r""" Display plots showing the current grid.
603
+
604
+ Args:
605
+ ngrid (int): The number of grid nodes in each
606
+ direction to include in the plot. The default is 40.
607
+ axes: List of pairs of directions to use in
608
+ different views of the grid. Using ``None`` in
609
+ place of a direction plots the grid for only one
610
+ direction. Omitting ``axes`` causes a default
611
+ set of pairings to be used.
612
+ shrink: Display entire range of each axis
613
+ if ``False``; otherwise shrink range to include
614
+ just the nodes being displayed. The default is
615
+ ``False``.
616
+ plotter: :mod:`matplotlib` plotter to use for plots; plots
617
+ are not displayed if set. Ignored if ``None``, and
618
+ plots are displayed using ``matplotlib.pyplot``.
619
+ """
620
+ if plotter is not None:
621
+ plt = plotter
622
+ else:
623
+ try:
624
+ import matplotlib.pyplot as plt
625
+ except ImportError:
626
+ warnings.warn('matplotlib not installed; cannot show_grid')
627
+ return
628
+ dim = self.dim
629
+ if axes is None:
630
+ axes = []
631
+ if dim == 1:
632
+ axes = [(0, None)]
633
+ for d in range(dim):
634
+ axes.append((d, (d + 1) % dim))
635
+ else:
636
+ if len(axes) <= 0:
637
+ return
638
+ for dx,dy in axes:
639
+ if dx is not None and (dx < 0 or dx >= dim):
640
+ raise ValueError('bad directions: %s' % str((dx, dy)))
641
+ if dy is not None and (dy < 0 or dy >= dim):
642
+ raise ValueError('bad directions: %s' % str((dx, dy)))
643
+ fig = plt.figure()
644
+ def plotdata(idx, grid=numpy.asarray(self.grid), ninc=numpy.asarray(self.ninc), axes=axes):
645
+ dx, dy = axes[idx[0]]
646
+ if dx is not None:
647
+ nskip = int(ninc[dx] // ngrid)
648
+ if nskip < 1:
649
+ nskip = 1
650
+ start = nskip // 2
651
+ xrange = [grid[dx, 0], grid[dx, ninc[dx]]]
652
+ xgrid = grid[dx, start::nskip]
653
+ xlabel = 'x[%d]' % dx
654
+ else:
655
+ xrange = [0., 1.]
656
+ xgrid = None
657
+ xlabel = ''
658
+ if dy is not None:
659
+ nskip = int(ninc[dy] // ngrid)
660
+ if nskip < 1:
661
+ nskip = 1
662
+ start = nskip // 2
663
+ yrange = [grid[dy, 0], grid[dy, ninc[dy]]]
664
+ ygrid = grid[dy, start::nskip]
665
+ ylabel = 'x[%d]' % dy
666
+ else:
667
+ yrange = [0., 1.]
668
+ ygrid = None
669
+ ylabel = ''
670
+ if shrink:
671
+ if xgrid is not None:
672
+ xrange = [min(xgrid), max(xgrid)]
673
+ if ygrid is not None:
674
+ yrange = [min(ygrid), max(ygrid)]
675
+ if None not in [dx, dy]:
676
+ fig_caption = 'axes %d, %d' % (dx, dy)
677
+ elif dx is None and dy is not None:
678
+ fig_caption = 'axis %d' % dy
679
+ elif dx is not None and dy is None:
680
+ fig_caption = 'axis %d' % dx
681
+ else:
682
+ return
683
+ fig.clear()
684
+ plt.title(
685
+ "%s (press 'n', 'p', 'q' or a digit)"
686
+ % fig_caption
687
+ )
688
+ plt.xlabel(xlabel)
689
+ plt.ylabel(ylabel)
690
+ if xgrid is not None:
691
+ for i in range(len(xgrid)):
692
+ plt.plot([xgrid[i], xgrid[i]], yrange, 'k-')
693
+ if ygrid is not None:
694
+ for i in range(len(ygrid)):
695
+ plt.plot(xrange, [ygrid[i], ygrid[i]], 'k-')
696
+ plt.xlim(*xrange)
697
+ plt.ylim(*yrange)
698
+
699
+ plt.draw()
700
+
701
+ idx = [0]
702
+ def onpress(event, idx=idx):
703
+ try: # digit?
704
+ idx[0] = int(event.key)
705
+ except ValueError:
706
+ if event.key == 'n':
707
+ idx[0] += 1
708
+ if idx[0] >= len(axes):
709
+ idx[0] = len(axes) - 1
710
+ elif event.key == 'p':
711
+ idx[0] -= 1
712
+ if idx[0] < 0:
713
+ idx[0] = 0
714
+ elif event.key == 'q':
715
+ plt.close()
716
+ return
717
+ else:
718
+ return
719
+ plotdata(idx)
720
+
721
+ fig.canvas.mpl_connect('key_press_event', onpress)
722
+ plotdata(idx)
723
+ if plotter is None:
724
+ plt.show()
725
+ else:
726
+ return plt
727
+
728
+ def adapt_to_samples(self, x, f, nitn=5, alpha=1.0, nproc=1):
729
+ r""" Adapt map to data ``{x, f(x)}``.
730
+
731
+ Replace grid with one that is optimized for integrating
732
+ function ``f(x)``. New grid is found iteratively
733
+
734
+ Args:
735
+ x (array): ``x[:, d]`` are the components of the sample points
736
+ in direction ``d=0,1...self.dim-1``.
737
+ f (callable or array): Function ``f(x)`` to be adapted to. If
738
+ ``f`` is an array, it is assumes to contain values ``f[i]``
739
+ corresponding to the function evaluated at points ``x[i]``.
740
+ nitn (int): Number of iterations to use in adaptation. Default
741
+ is ``nitn=5``.
742
+ alpha (float): Damping parameter for adaptation. Default
743
+ is ``alpha=1.0``. Smaller values slow the iterative
744
+ adaptation, to improve stability of convergence.
745
+ nproc (int or None): Number of processes/processors to use.
746
+ If ``nproc>1`` Python's :mod:`multiprocessing` module is
747
+ used to spread the calculation across multiple processors.
748
+ There is a significant overhead involved in using
749
+ multiple processors so this option is useful mainly
750
+ when very high dimenions or large numbers of samples
751
+ are involved. When using the :mod:`multiprocessing`
752
+ module in its default mode for MacOS and Windows,
753
+ it is important that the main module can be
754
+ safely imported (i.e., without launching new
755
+ processes). This can be accomplished with
756
+ some version of the ``if __name__ == '__main__`:``
757
+ construct in the main module: e.g., ::
758
+
759
+ if __name__ == '__main__':
760
+ main()
761
+
762
+ This is not an issue on other Unix platforms.
763
+ See the :mod:`multiprocessing` documentation
764
+ for more information.
765
+ Set ``nproc=None`` to use all the processors
766
+ on the machine (equivalent to ``nproc=os.cpu_count()``).
767
+ Default value is ``nproc=1``. (Requires Python 3.3 or later.)
768
+ """
769
+ cdef Py_ssize_t i, tmp_ninc, old_ninc
770
+ x = numpy.ascontiguousarray(x)
771
+ if len(x.shape) != 2 or x.shape[1] != self.dim:
772
+ raise ValueError('incompatible shape of x: {}'.format(x.shape))
773
+ if nproc is None:
774
+ nproc = os.cpu_count()
775
+ if nproc is None:
776
+ raise ValueError("need to specify nproc (nproc=None does't work on this machine)")
777
+ nproc = int(nproc)
778
+ if callable(f):
779
+ fx = numpy.ascontiguousarray(f(x))
780
+ else:
781
+ fx = numpy.ascontiguousarray(f)
782
+ if fx.shape[0] != x.shape[0]:
783
+ raise ValueError('shape of x and f(x) mismatch: {} vs {}'.format(x.shape, fx.shape))
784
+ old_ninc = max(max(self.ninc), Integrator.defaults['maxinc_axis'])
785
+ tmp_ninc = type(old_ninc)(min(old_ninc, x.shape[0] / 10.))
786
+ if tmp_ninc < 2:
787
+ raise ValueError('not enough samples: {}'.format(x.shape[0]))
788
+ y = numpy.empty(x.shape, float)
789
+ jac = numpy.empty(x.shape[0], float)
790
+ self.adapt(ninc=tmp_ninc)
791
+ if nproc > 1:
792
+ pool = multiprocessing.Pool(processes=nproc)
793
+ for i in range(nitn):
794
+ self._add_training_data(x, f, fx, nproc, pool)
795
+ self.adapt(alpha=alpha, ninc=tmp_ninc)
796
+ pool.close()
797
+ pool.join()
798
+ else:
799
+ for i in range(nitn):
800
+ self.invmap(x, y, jac)
801
+ self.add_training_data(y, (jac * fx) ** 2)
802
+ self.adapt(alpha=alpha, ninc=tmp_ninc)
803
+ if numpy.any(tmp_ninc != old_ninc):
804
+ self.adapt(ninc=old_ninc)
805
+
806
+ def _add_training_data(self, x, f, fx, nproc, pool):
807
+ " Used by self.adapt_to_samples in multiprocessing mode. "
808
+ nx = x.shape[0]
809
+ end = 0
810
+ args = []
811
+ for i in range(nproc):
812
+ nx = (x.shape[0] - end) // (nproc - i)
813
+ start = end
814
+ end = start + nx
815
+ args += [(
816
+ self,
817
+ x[start:end, :],
818
+ fx[start:end]
819
+ )]
820
+ res = pool.starmap(self._collect_training_data, args, 1)
821
+ self.sum_f = numpy.sum([resi[0] for resi in res], axis=0)
822
+ self.n_f = numpy.sum([resi[1] for resi in res], axis=0) + TINY
823
+
824
+ @staticmethod
825
+ def _collect_training_data(map, x, fx):
826
+ " Used by self.adapt_to_samples in multiprocessing mode. "
827
+ map.clear()
828
+ y = numpy.empty(x.shape, float)
829
+ jac = numpy.empty(x.shape[0], float)
830
+ map.invmap(x, y, jac)
831
+ map.add_training_data(y, (fx * jac)**2)
832
+ return (numpy.asarray(map.sum_f), numpy.asarray(map.n_f))
833
+
834
+ cdef class Integrator(object):
835
+ r""" Adaptive multidimensional Monte Carlo integration.
836
+
837
+ :class:`vegas.Integrator` objects make Monte Carlo
838
+ estimates of multidimensional functions ``f(x)``
839
+ where ``x[d]`` is a point in the integration volume::
840
+
841
+ integ = vegas.Integrator(integration_region)
842
+
843
+ result = integ(f, nitn=10, neval=10000)
844
+
845
+ The integator makes ``nitn`` estimates of the integral, each
846
+ using at most ``neval`` samples of the integrand, as it adapts to
847
+ the specific features of the integrand. Successive estimates (iterations)
848
+ typically improve in accuracy until the integrator has fully
849
+ adapted. The integrator returns the weighted average of all
850
+ ``nitn`` estimates, together with an estimate of the statistical
851
+ (Monte Carlo) uncertainty in that estimate of the integral. The
852
+ result is an object of type :class:`RAvg` (which is derived
853
+ from :class:`gvar.GVar`).
854
+
855
+ Integrands ``f(x)`` return numbers, arrays of numbers (any shape), or
856
+ dictionaries whose values are numbers or arrays (any shape). Each number
857
+ returned by an integrand corresponds to a different integrand. When
858
+ arrays are returned, |vegas| adapts to the first number
859
+ in the flattened array. When dictionaries are returned,
860
+ |vegas| adapts to the first number in the value corresponding to
861
+ the first key.
862
+
863
+ |vegas| can generate integration points in batches for integrands
864
+ built from classes derived from :class:`vegas.LBatchIntegrand`, or
865
+ integrand functions decorated by :func:`vegas.lbatchintegrand`. Batch
866
+ integrands are typically much faster, especially if they are coded in
867
+ Cython or C/C++ or Fortran.
868
+
869
+ |Integrator|\s have a large number of parameters but the
870
+ only ones that most people will care about are: the
871
+ number ``nitn`` of iterations of the |vegas| algorithm;
872
+ the maximum number ``neval`` of integrand evaluations per
873
+ iteration; and the damping parameter ``alpha``, which is used
874
+ to slow down the adaptive algorithms when they would otherwise
875
+ be unstable (e.g., with very peaky integrands). Setting parameter
876
+ ``analyzer=vegas.reporter()`` is sometimes useful, as well,
877
+ since it causes |vegas| to print (on ``sys.stdout``)
878
+ intermediate results from each iteration, as they are
879
+ produced. This helps when each iteration takes a long time
880
+ to complete (e.g., longer than an hour) because it allows you to
881
+ monitor progress as it is being made (or not).
882
+
883
+ Args:
884
+ map (array, dictionary, :class:`vegas.AdaptiveMap` or :class:`vegas.Integrator`):
885
+ The integration region is specified by an array ``map[d, i]``
886
+ where ``d`` is the direction and ``i=0,1`` specify the lower
887
+ and upper limits of integration in direction ``d``. Integration
888
+ points ``x`` are packaged as arrays ``x[d]`` when
889
+ passed to the integrand ``f(x)``.
890
+
891
+ More generally, the integrator packages integration points in
892
+ multidimensional arrays ``x[d1, d2..dn]`` when the integration
893
+ limits are specified by ``map[d1, d2...dn, i]`` with ``i=0,1``.
894
+ These arrays can have any shape.
895
+
896
+ Alternatively, the integration region can be specified by a
897
+ dictionary whose values ``map[key]`` are either 2-tuples or
898
+ arrays of 2-tuples corresponding to the lower and upper
899
+ integration limits for the corresponding variables. Then
900
+ integration points ``xd`` are packaged as dictionaries
901
+ having the same structure as ``map`` but with the integration
902
+ limits replaced by the values of the variables:
903
+ for example, ::
904
+
905
+ map = dict(r=(0, 1), phi=[(0, np.pi), (0, 2 * np.pi)])
906
+
907
+ indicates a three-dimensional integral over variables ``r``
908
+ (from ``0`` to ``1``), ``phi[0]`` (from ``0`` to ``np.pi``),
909
+ and ``phi[1]`` (from ``0`` to ``2*np.pi``). In this case
910
+ integrands ``f(xd)`` have dictionary arguments ``xd`` where
911
+ ``xd['r']``, ``xd['phi'][0]``, and ``xd['phi'][1]``
912
+ correspond to the integration variables.
913
+
914
+ Finally ``map`` could be the integration map from
915
+ another |Integrator|, or that |Integrator|
916
+ itself. In this case the grid is copied from the
917
+ existing integrator.
918
+ uses_jac (bool): Setting ``uses_jac=True`` causes |vegas| to
919
+ call the integrand with two arguments: ``fcn(x, jac=jac)``.
920
+ The second argument is the Jacobian ``jac[d] = dx[d]/dy[d]``
921
+ of the |vegas| map. The integral over ``x[d]`` of ``1/jac[d]``
922
+ equals 1 (exactly). The default setting
923
+ is ``uses_jac=False``.
924
+ nitn (positive int): The maximum number of iterations used to
925
+ adapt to the integrand and estimate its value. The
926
+ default value is 10; typical values range from 10
927
+ to 20.
928
+ neval (positive int): Approximate number of integrand evaluations
929
+ in each iteration of the |vegas| algorithm. Increasing
930
+ ``neval`` increases the precision: statistical errors should
931
+ fall at least as fast as ``sqrt(1./neval)`` and often
932
+ fall much faster. The default value is 1000;
933
+ real problems often require 10--10,000 times more evaluations
934
+ than this.
935
+ nstrat (int array): ``nstrat[d]`` specifies the number of
936
+ stratifications to use in direction ``d``. By default this
937
+ parameter is set automatically, based on parameter ``neval``,
938
+ with ``nstrat[d]`` approximately the same for every ``d``.
939
+ Specifying ``nstrat`` explicitly makes it possible to
940
+ concentrate stratifications in directions where they are most
941
+ needed. If ``nstrat`` is set but ``neval`` is not,
942
+ ``neval`` is set equal to ``2*prod(nstrat)/(1-neval_frac)``.
943
+ alpha (float): Damping parameter controlling the remapping
944
+ of the integration variables as |vegas| adapts to the
945
+ integrand. Smaller values slow adaptation, which may be
946
+ desirable for difficult integrands. Small or zero ``alpha``\s
947
+ are also sometimes useful after the grid has adapted,
948
+ to minimize fluctuations away from the optimal grid.
949
+ The default value is 0.5.
950
+ beta (float): Damping parameter controlling the redistribution
951
+ of integrand evaluations across hypercubes in the
952
+ stratified sampling of the integral (over transformed
953
+ variables). Smaller values limit the amount of
954
+ redistribution. The theoretically optimal value is 1;
955
+ setting ``beta=0`` prevents any redistribution of
956
+ evaluations. The default value is 0.75.
957
+ neval_frac (float): Approximate fraction of function evaluations
958
+ used for adaptive stratified sampling. |vegas|
959
+ distributes ``(1-neval_frac)*neval`` integrand evaluations
960
+ uniformly over all hypercubes, with at least 2 evaluations
961
+ per hypercube. The remaining ``neval_frac*neval``
962
+ evaluations are concentrated in hypercubes where the errors
963
+ are largest. Increasing ``neval_frac`` makes more integrand
964
+ evaluations available for adaptive stratified
965
+ sampling, but reduces the number of hypercubes, which limits
966
+ the algorithm's ability to adapt. Ignored when ``beta=0``.
967
+ Default is ``neval_frac=0.75``.
968
+ adapt (bool): Setting ``adapt=False`` prevents further
969
+ adaptation by |vegas|. Typically this would be done
970
+ after training the |Integrator| on an integrand, in order
971
+ to stabilize further estimates of the integral. |vegas| uses
972
+ unweighted averages to combine results from different
973
+ iterations when ``adapt=False``. The default setting
974
+ is ``adapt=True``.
975
+ nproc (int or None): Number of processes/processors used
976
+ to evalute the integrand. If ``nproc>1`` Python's
977
+ :mod:`multiprocessing` module is used to spread
978
+ integration points across multiple processors, thereby
979
+ potentially reducing the time required to evaluate the
980
+ integral. There is a significant overhead involved in using
981
+ multiple processors so this option is useful only when
982
+ the integrand is expensive to evaluate. When using the
983
+ :mod:`multiprocessing` module in its default mode for
984
+ MacOS and Windows, it is important that the main module
985
+ can be safely imported (i.e., without launching new
986
+ processes). This can be accomplished with
987
+ some version of the ``if __name__ == '__main__`:``
988
+ construct in the main module: e.g., ::
989
+
990
+ if __name__ == '__main__':
991
+ main()
992
+
993
+ This is not an issue on other Unix platforms.
994
+ See the :mod:`multiprocessing` documentation
995
+ for more information. Note that setting ``nproc``
996
+ greater than 1 disables MPI support.
997
+ Set ``nproc=None`` to use all the processors
998
+ on the machine (equivalent to ``nproc=os.cpu_count()``).
999
+ Default value is ``nproc=1``. (Requires Python 3.3 or later.)
1000
+
1001
+ Note that ``nproc`` has nothing to do with MPI support.
1002
+ The number of MPI processors is specified outside Python
1003
+ (via, for example, ``mpirun -np 8 python script.py`` on
1004
+ the command line).
1005
+ analyzer: An object with methods
1006
+
1007
+ ``analyzer.begin(itn, integrator)``
1008
+
1009
+ ``analyzer.end(itn_result, result)``
1010
+
1011
+ where: ``begin(itn, integrator)`` is called at the start
1012
+ of each |vegas| iteration with ``itn`` equal to the
1013
+ iteration number and ``integrator`` equal to the
1014
+ integrator itself; and ``end(itn_result, result)``
1015
+ is called at the end of each iteration with
1016
+ ``itn_result`` equal to the result for that
1017
+ iteration and ``result`` equal to the cummulative
1018
+ result of all iterations so far.
1019
+ Setting ``analyzer=vegas.reporter()``, for
1020
+ example, causes vegas to print out a running report
1021
+ of its results as they are produced. The default
1022
+ is ``analyzer=None``.
1023
+ min_neval_batch (positive int): The minimum number of integration
1024
+ points to be passed together to the integrand when using
1025
+ |vegas| in batch mode. The default value is 50,000. Larger
1026
+ values may be lead to faster evaluations, but at the cost of
1027
+ more memory for internal work arrays. The last batch is
1028
+ usually smaller than this limit, as it is limited by ``neval``.
1029
+ max_neval_hcube (positive int): Maximum number of integrand
1030
+ evaluations per hypercube in the stratification. The default
1031
+ value is 50,000. Larger values might allow for more adaptation
1032
+ (when ``beta>0``), but also allow for more over-shoot when
1033
+ adapting to sharp peaks. Larger values also can result in
1034
+ large internal work arrasy.
1035
+ minimize_mem (bool): When ``True``, |vegas| minimizes
1036
+ internal workspace by moving some of its data to
1037
+ a disk file. This increases execution time (slightly)
1038
+ and results in temporary files, but might be desirable
1039
+ when the number of evaluations is very large (e.g.,
1040
+ ``neval=1e9``). ``minimize_mem=True``
1041
+ requires the ``h5py`` Python module.
1042
+ max_mem (positive float): Maximum number of floats allowed in
1043
+ internal work arrays (approx.). A ``MemoryError`` is
1044
+ raised if the work arrays are too large, in which case
1045
+ one might want to reduce ``min_neval_batch`` or
1046
+ ``max_neval_hcube``, or set ``minimize_mem=True``
1047
+ (or increase ``max_mem`` if there is enough RAM).
1048
+ Default value is 1e9.
1049
+ maxinc_axis (positive int): The maximum number of increments
1050
+ per axis allowed for the |x|-space grid. The default
1051
+ value is 1000; there is probably little need to use
1052
+ other values.
1053
+ rtol (float): Relative error in the integral estimate
1054
+ at which point the integrator can stop. The default
1055
+ value is 0.0 which turns off this stopping condition.
1056
+ This stopping condition can be quite unreliable
1057
+ in early iterations, before |vegas| has converged.
1058
+ Use with caution, if at all.
1059
+ atol (float): Absolute error in the integral estimate
1060
+ at which point the integrator can stop. The default
1061
+ value is 0.0 which turns off this stopping condition.
1062
+ This stopping condition can be quite unreliable
1063
+ in early iterations, before |vegas| has converged.
1064
+ Use with caution, if at all.
1065
+ ran_array_generator: Replacement function for the default
1066
+ random number generator. ``ran_array_generator(size)``
1067
+ should create random numbers uniformly distributed
1068
+ between 0 and 1 in an array whose dimensions are specified by the
1069
+ integer-valued tuple ``size``. Setting ``ran_array_generator``
1070
+ to ``None`` restores the default generator (from :mod:`gvar`).
1071
+ sync_ran (bool): If ``True`` (default), the *default* random
1072
+ number generator is synchronized across all processors when
1073
+ using MPI. If ``False``, |vegas| does no synchronization
1074
+ (but the random numbers should synchronized some other
1075
+ way). Ignored if not using MPI.
1076
+ adapt_to_errors (bool):
1077
+ ``adapt_to_errors=False`` causes |vegas| to remap the
1078
+ integration variables to emphasize regions where ``|f(x)|``
1079
+ is largest. This is the default mode.
1080
+
1081
+ ``adapt_to_errors=True`` causes |vegas| to remap
1082
+ variables to emphasize regions where the Monte Carlo
1083
+ error is largest. This might be superior when
1084
+ the number of the number of stratifications (``self.nstrat``)
1085
+ in the |y| grid is large (> 100). It is typically
1086
+ useful only in one or two dimensions.
1087
+ uniform_nstrat (bool): If ``True``, requires that the
1088
+ ``nstrat[d]`` be equal for all ``d``. If ``False`` (default),
1089
+ the algorithm maximizes the number of stratifications while
1090
+ requiring ``|nstrat[d1] - nstrat[d2]| <= 1``. This parameter
1091
+ is ignored if ``nstrat`` is specified explicitly.
1092
+ mpi (bool): Setting ``mpi=False`` (default) disables ``mpi`` support in
1093
+ ``vegas`` even if ``mpi`` is available; setting ``mpi=True``
1094
+ allows use of ``mpi`` provided module :mod:`mpi4py`
1095
+ is installed.
1096
+ """
1097
+
1098
+ # Settings accessible via the constructor and Integrator.set
1099
+ defaults = dict(
1100
+ map=None, # integration region, AdaptiveMap, or Integrator
1101
+ neval=1000, # number of evaluations per iteration
1102
+ maxinc_axis=1000, # number of adaptive-map increments per axis
1103
+ min_neval_batch=50000, # min. number of evaluations per batch
1104
+ max_neval_hcube=50000, # max number of evaluations per h-cube
1105
+ neval_frac=0.75, # fraction of evaluations used for adaptive stratified sampling
1106
+ max_mem=1e9, # memory cutoff (# of floats)
1107
+ nitn=10, # number of iterations
1108
+ alpha=0.5, # damping parameter for importance sampling
1109
+ beta=0.75, # damping parameter for stratified sampliing
1110
+ adapt=True, # flag to turn adaptation on or off
1111
+ minimize_mem=False, # minimize work memory (when neval very large)?
1112
+ adapt_to_errors=False, # alternative approach to stratified sampling (low dim)?
1113
+ uniform_nstrat=False, # require same nstrat[d] for all directions d?
1114
+ rtol=0, # relative error tolerance
1115
+ atol=0, # absolute error tolerance
1116
+ analyzer=None, # analyzes results from each iteration
1117
+ ran_array_generator=None, # alternative random number generator
1118
+ sync_ran=True, # synchronize random generators across MPI processes?
1119
+ mpi=False, # allow MPI?
1120
+ uses_jac=False, # return Jacobian to integrand?
1121
+ nproc=1, # number of processors to use
1122
+ )
1123
+
1124
+ def __init__(Integrator self not None, map, **kargs):
1125
+ # N.B. All attributes initialized automatically by cython.
1126
+ # This is why self.set() works here.
1127
+ self.neval_hcube_range = None
1128
+ self.last_neval = 0
1129
+ self.pool = None
1130
+ self.sigf_h5 = None
1131
+ if isinstance(map, Integrator):
1132
+ self._set_map(map.map)
1133
+ args = {}
1134
+ for k in Integrator.defaults:
1135
+ if k != 'map':
1136
+ args[k] = getattr(map, k)
1137
+ # following not in Integrator.defaults
1138
+ self.sigf = numpy.array(map.sigf)
1139
+ self.sum_sigf = numpy.sum(self.sigf)
1140
+ self.nstrat = numpy.array(map.nstrat)
1141
+ else:
1142
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1143
+ self.sum_sigf = HUGE
1144
+ args = dict(Integrator.defaults)
1145
+ if 'map' in args:
1146
+ del args['map']
1147
+ self._set_map(map)
1148
+ self.nstrat = numpy.full(self.map.dim, 0, dtype=numpy.intp) # dummy (flags action in self.set())
1149
+ args.update(kargs)
1150
+ if 'nstrat' in kargs and 'neval' not in kargs and 'neval' in args:
1151
+ del args['neval']
1152
+ if 'neval' in kargs and 'nstrat' not in kargs and 'nstrat' in args:
1153
+ del args['nstrat']
1154
+ self.set(args)
1155
+
1156
+ def __del__(self):
1157
+ self._clear_sigf_h5()
1158
+ if self.pool is not None:
1159
+ self.pool.close()
1160
+ self.pool.join()
1161
+ self.pool = None
1162
+
1163
+ def _clear_sigf_h5(self):
1164
+ if self.sigf_h5 is not None:
1165
+ fname = self.sigf_h5.filename
1166
+ self.sigf_h5.close()
1167
+ os.unlink(fname)
1168
+ self.sigf_h5 = None
1169
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1170
+ self.sum_sigf = HUGE
1171
+
1172
+ def __reduce__(Integrator self not None):
1173
+ r""" Capture state for pickling. """
1174
+ odict = dict()
1175
+ for k in Integrator.defaults:
1176
+ if k in ['map']:
1177
+ continue
1178
+ odict[k] = getattr(self, k)
1179
+ odict['nstrat'] = numpy.asarray(self.nstrat)
1180
+ odict['sigf'] = numpy.asarray(self.sigf)
1181
+ return (Integrator, (self.map,), odict)
1182
+
1183
+ def __setstate__(Integrator self not None, odict):
1184
+ r""" Set state for unpickling. """
1185
+ self.set(odict)
1186
+
1187
+ def _set_map(self, map):
1188
+ r""" install new map, create xsample """
1189
+ if isinstance(map, AdaptiveMap):
1190
+ self.map = AdaptiveMap(map)
1191
+ self.xsample = numpy.empty(self.map.dim, dtype=float)
1192
+ for d in range(self.map.dim):
1193
+ self.xsample[d] = gvar.RNG.uniform(*self.map.region(d))
1194
+ elif isinstance(map, Integrator):
1195
+ self.map = AdaptiveMap(map.map)
1196
+ self.xsample = (
1197
+ gvar.BufferDict(map.xsample)
1198
+ if map.xsample.shape is None else
1199
+ numpy.array(map.xsample)
1200
+ )
1201
+ else:
1202
+ if hasattr(map, 'keys'):
1203
+ map = gvar.asbufferdict(map)
1204
+ self.xsample = gvar.BufferDict()
1205
+ limits = []
1206
+ for k in map:
1207
+ shape = map[k].shape[:-1]
1208
+ if shape == ():
1209
+ self.xsample[k] = gvar.RNG.uniform(*map[k])
1210
+ limits.append(map[k])
1211
+ else:
1212
+ self.xsample[k] = numpy.empty(shape, dtype=float)
1213
+ for idx in numpy.ndindex(shape):
1214
+ self.xsample[k][idx] = gvar.RNG.uniform(*map[k][idx])
1215
+ limits += numpy.array(map[k]).reshape(-1,2).tolist()
1216
+ self.map = AdaptiveMap(limits)
1217
+ else:
1218
+ # need to allow for possibility that map is a grid with differeing numbers of
1219
+ # nodes in different directions; do this with the dtype=object in following
1220
+ map = numpy.array(map, dtype=object)
1221
+ if numpy.shape(map.flat[0]) == ():
1222
+ # homogeneous array
1223
+ self.xsample = numpy.empty(map.shape[:-1], dtype=float)
1224
+ grid = map.reshape(-1, 2)
1225
+ else:
1226
+ # heterogeneous array
1227
+ self.xsample = numpy.empty(map.shape, dtype=float)
1228
+ grid = map.reshape(-1)
1229
+ self.map = AdaptiveMap(grid)
1230
+ for i, idx in enumerate(numpy.ndindex(self.xsample.shape)):
1231
+ self.xsample[idx] = gvar.RNG.uniform(*self.map.region(i))
1232
+
1233
+
1234
+ def set(Integrator self not None, ka={}, **kargs):
1235
+ r""" Reset default parameters in integrator.
1236
+
1237
+ Usage is analogous to the constructor
1238
+ for |Integrator|: for example, ::
1239
+
1240
+ old_defaults = integ.set(neval=1e6, nitn=20)
1241
+
1242
+ resets the default values for ``neval`` and ``nitn``
1243
+ in |Integrator| ``integ``. A dictionary, here
1244
+ ``old_defaults``, is returned. It can be used
1245
+ to restore the old defaults using, for example::
1246
+
1247
+ integ.set(old_defaults)
1248
+ """
1249
+ # 1) reset parameters
1250
+ if kargs:
1251
+ kargs.update(ka)
1252
+ else:
1253
+ kargs = ka
1254
+ old_val = dict() # records anything that is changed
1255
+ nstrat = None
1256
+ for k in kargs:
1257
+ if k == 'map':
1258
+ old_val['map'] = self.map
1259
+ self._set_map(kargs['map'])
1260
+ elif k == 'nstrat':
1261
+ if kargs['nstrat'] is None:
1262
+ continue
1263
+ old_val['nstrat'] = self.nstrat
1264
+ nstrat = numpy.array(kargs['nstrat'], dtype=numpy.intp)
1265
+ elif k == 'sigf':
1266
+ old_val['sigf'] = self.sigf
1267
+ self.sigf = numpy.fabs(kargs['sigf'])
1268
+ self.sum_sigf = numpy.sum(self.sigf)
1269
+ elif k == 'nproc':
1270
+ old_val['nproc'] = self.nproc
1271
+ self.nproc = kargs['nproc'] if kargs['nproc'] is not None else os.cpu_count()
1272
+ if self.nproc is None:
1273
+ self.nproc = 1
1274
+ if self.nproc != old_val['nproc']:
1275
+ if self.pool is not None:
1276
+ self.pool.close()
1277
+ self.pool.join()
1278
+ if self.nproc != 1:
1279
+ try:
1280
+ self.pool = multiprocessing.Pool(processes=self.nproc)
1281
+ except:
1282
+ self.nproc = 1
1283
+ self.pool = None
1284
+ else:
1285
+ self.pool = None
1286
+ elif k in Integrator.defaults:
1287
+ # ignore entry if set to None (useful for debugging)
1288
+ # if kargs[k] is None:
1289
+ # continue
1290
+ old_val[k] = getattr(self, k)
1291
+ try:
1292
+ setattr(self, k, kargs[k])
1293
+ except:
1294
+ setattr(self, k, type(old_val[k])(kargs[k]))
1295
+ elif k not in ['nhcube_batch', 'max_nhcube']:
1296
+ # ignore legacy parameters, but raise error for others
1297
+ raise AttributeError('no parameter named "%s"' % str(k))
1298
+
1299
+ # 2) sanity checks
1300
+ if nstrat is not None:
1301
+ if len(nstrat) != self.map.dim:
1302
+ raise ValueError('nstrat[d] has wrong length: %d not %d' % (len(nstrat), self.map.dim))
1303
+ if numpy.any(nstrat < 1):
1304
+ raise ValueError('bad nstrat: ' + str(numpy.asarray(self.nstrat)))
1305
+ if self.neval_frac < 0 or self.neval_frac >= 1:
1306
+ raise ValueError('neval_frac = {} but require 0 <= neval_frac < 1'.format(self.neval_frac))
1307
+ if 'neval' in old_val and self.neval < 2:
1308
+ raise ValueError('neval>2 required, not ' + str(self.neval))
1309
+ neval_frac = 0 if (self.beta == 0 or self.adapt_to_errors) else self.neval_frac
1310
+
1311
+ self.dim = self.map.dim
1312
+
1313
+ # 3) determine # strata in each direction
1314
+ if nstrat is not None:
1315
+ # nstrat specified explicitly
1316
+ if len(nstrat) != self.dim or min(nstrat) < 1:
1317
+ raise ValueError('bad nstrat = %s' % str(numpy.asarray(nstrat)))
1318
+ nhcube = numpy.prod(nstrat)
1319
+ if 'neval' not in old_val:
1320
+ old_val['neval'] = self.neval
1321
+ self.neval = type(self.neval)(2. * nhcube / (1. - neval_frac))
1322
+ elif self.neval < 2. * nhcube / (1. - neval_frac):
1323
+ raise ValueError('neval too small: {} < {}'.format(self.neval, 2. * nhcube / (1. - neval_frac)))
1324
+ elif 'neval' in old_val or 'neval_frac' in old_val: ##### or 'max_nhcube' in old_val:
1325
+ # determine stratification from neval,neval_frac if either was specified
1326
+ ns = int(abs((1 - neval_frac) * self.neval / 2.) ** (1. / self.dim)) # stratifications / axis
1327
+ if ns < 1:
1328
+ ns = 1
1329
+ d = int(
1330
+ (numpy.log((1 - neval_frac) * self.neval / 2.) - self.dim * numpy.log(ns))
1331
+ / numpy.log(1 + 1. / ns)
1332
+ )
1333
+ if ((ns + 1)**d * ns**(self.dim-d)) > self.max_mem and not self.minimize_mem:
1334
+ raise MemoryError("work arrays larger than max_mem; set minimize_mem=True (and install h5py module) or increase max_mem")
1335
+ # ns = int(abs(self.max_nhcube) ** abs(1. / self.dim))
1336
+ # d = int(
1337
+ # (numpy.log(self.max_nhcube) - self.dim * numpy.log(ns))
1338
+ # / numpy.log(1 + 1. / ns)
1339
+ # )
1340
+ if self.uniform_nstrat:
1341
+ d = 0
1342
+ nstrat = numpy.empty(self.dim, numpy.intp)
1343
+ nstrat[:d] = ns + 1
1344
+ nstrat[d:] = ns
1345
+ else:
1346
+ # go with existing grid if none of nstrat, neval and neval_frac changed
1347
+ nstrat = self.nstrat
1348
+
1349
+ # 4) reconfigure vegas map, if necessary
1350
+ if self.adapt_to_errors:
1351
+ self.map.adapt(ninc=numpy.asarray(nstrat))
1352
+ else:
1353
+ ni = min(int(self.neval / 10.), self.maxinc_axis) # increments/axis
1354
+ ninc = numpy.empty(self.dim, numpy.intp)
1355
+ for d in range(self.dim):
1356
+ if ni >= nstrat[d]:
1357
+ ninc[d] = int(ni / nstrat[d]) * nstrat[d]
1358
+ elif nstrat[d] <= self.maxinc_axis:
1359
+ ninc[d] = nstrat[d]
1360
+ else:
1361
+ nstrat[d] = int(nstrat[d] / ni) * ni
1362
+ ninc[d] = ni
1363
+ if not numpy.all(numpy.equal(self.map.ninc, ninc)):
1364
+ self.map.adapt(ninc=ninc)
1365
+
1366
+ if not numpy.all(numpy.equal(self.nstrat, nstrat)):
1367
+ if 'sigf' not in old_val:
1368
+ # need to recalculate stratification distribution for beta>0
1369
+ # unless a new sigf was set
1370
+ old_val['sigf'] = self.sigf
1371
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1372
+ self.sum_sigf = HUGE
1373
+ self.nstrat = nstrat
1374
+
1375
+ # 5) set min_neval_hcube
1376
+ # chosen so that actual neval is close to but not larger than self.neval
1377
+ # (unless self.minimize_mem is True in which case it could be larger)
1378
+ self.nhcube = numpy.prod(self.nstrat, dtype=type(self.nhcube))
1379
+ avg_neval_hcube = int(self.neval / self.nhcube)
1380
+ if self.nhcube == 1:
1381
+ self.min_neval_hcube = int(self.neval)
1382
+ else:
1383
+ self.min_neval_hcube = int((1 - neval_frac) * self.neval / self.nhcube)
1384
+ if self.min_neval_hcube < 2:
1385
+ self.min_neval_hcube = 2
1386
+
1387
+ # 6) allocate work arrays -- these are stored in the
1388
+ # the Integrator so that the storage is held between
1389
+ # iterations, thereby minimizing the amount of allocating
1390
+ # that goes on
1391
+
1392
+ # neval_batch = self.nhcube_batch * avg_neval_hcube
1393
+ nsigf = self.nhcube
1394
+ if self.beta >= 0 and self.nhcube > 1 and not self.adapt_to_errors and len(self.sigf) != nsigf:
1395
+ # set up sigf
1396
+ self._clear_sigf_h5()
1397
+ if not self.minimize_mem:
1398
+ self.sigf = numpy.ones(nsigf, float)
1399
+ else:
1400
+ try:
1401
+ import h5py
1402
+ except ImportError:
1403
+ raise ValueError("Install the h5py Python module in order to use minimize_mem=True")
1404
+ self.sigf_h5 = h5py.File(tempfile.mkstemp(dir='.', prefix='vegastmp_')[1], 'a')
1405
+ self.sigf_h5.create_dataset('sigf', shape=(nsigf,), dtype=float, chunks=True, fillvalue=1.)
1406
+ self.sigf = self.sigf_h5['sigf']
1407
+ self.sum_sigf = nsigf
1408
+ self.neval_hcube = numpy.empty(self.min_neval_batch // 2 + 1, dtype=numpy.intp)
1409
+ self.neval_hcube[:] = avg_neval_hcube
1410
+ self.y = numpy.empty((self.min_neval_batch, self.dim), float)
1411
+ self.x = numpy.empty((self.min_neval_batch, self.dim), float)
1412
+ self.jac = numpy.empty(self.min_neval_batch, float)
1413
+ self.fdv2 = numpy.empty(self.min_neval_batch, float)
1414
+ return old_val
1415
+
1416
+ def settings(Integrator self not None, ngrid=0):
1417
+ r""" Assemble summary of integrator settings into string.
1418
+
1419
+ Args:
1420
+ ngrid (int): Number of grid nodes in each direction
1421
+ to include in summary.
1422
+ The default is 0.
1423
+ Returns:
1424
+ String containing the settings.
1425
+ """
1426
+ cdef Py_ssize_t d
1427
+ nhcube = numpy.prod(self.nstrat)
1428
+ neval = nhcube * self.min_neval_hcube if self.beta <= 0 else self.neval
1429
+ ans = "Integrator Settings:\n"
1430
+ if self.beta > 0 and not self.adapt_to_errors:
1431
+ ans = ans + (
1432
+ " %.6g (approx) integrand evaluations in each of %d iterations\n"
1433
+ % (self.neval, self.nitn)
1434
+ )
1435
+ else:
1436
+ ans = ans + (
1437
+ " %.6g integrand evaluations in each of %d iterations\n"
1438
+ % (neval, self.nitn)
1439
+ )
1440
+ ans = ans + (
1441
+ " number of: strata/axis = %s\n" % str(numpy.array(self.nstrat))
1442
+ )
1443
+ ans = ans + (
1444
+ " increments/axis = %s\n"
1445
+ % str(numpy.asarray(self.map.ninc))
1446
+ )
1447
+ ans = ans + (
1448
+ " h-cubes = %.6g processors = %d\n"
1449
+ % (nhcube, self.nproc)
1450
+ )
1451
+ max_neval_hcube = max(self.max_neval_hcube, self.min_neval_hcube)
1452
+ ans = ans + (
1453
+ " evaluations/batch >= %.2g\n"
1454
+ % (float(self.min_neval_batch),)
1455
+ )
1456
+ ans = ans + (
1457
+ " %d <= evaluations/h-cube <= %.2g\n"
1458
+ % (int(self.min_neval_hcube), float(max_neval_hcube))
1459
+ )
1460
+ ans = ans + (
1461
+ " minimize_mem = %s adapt_to_errors = %s adapt = %s\n"
1462
+ % (str(self.minimize_mem), str(self.adapt_to_errors), str(self.adapt))
1463
+ )
1464
+ ans = ans + (" accuracy: relative = %g absolute = %g\n" % (self.rtol, self.atol))
1465
+ if not self.adapt:
1466
+ ans = ans + (
1467
+ " damping: alpha = %g beta= %g\n\n"
1468
+ % (0., 0.)
1469
+ )
1470
+ elif self.adapt_to_errors:
1471
+ ans = ans + (
1472
+ " damping: alpha = %g beta= %g\n\n"
1473
+ % (self.alpha, 0.)
1474
+ )
1475
+ else:
1476
+ ans = ans + (
1477
+ " damping: alpha = %g beta= %g\n\n"
1478
+ % (self.alpha, self.beta)
1479
+ )
1480
+
1481
+ # add integration limits
1482
+ offset = 4 * ' '
1483
+ entries = []
1484
+ axis = 0
1485
+ # self.limits = self.limits.buf.reshape(-1,2)
1486
+ limits = self.map.region()
1487
+ if self.xsample.shape is None:
1488
+ for k in self.xsample:
1489
+ if self.xsample[k].shape == ():
1490
+ entries.append((str(k), str(axis), str(limits[axis])))
1491
+ axis += 1
1492
+ else:
1493
+ prefix = str(k) + ' '
1494
+ for idx in numpy.ndindex(self.xsample[k].shape):
1495
+ str_idx = str(idx)[1:-1]
1496
+ str_idx = ''.join(str_idx.split(' '))
1497
+ if str_idx[-1] == ',':
1498
+ str_idx = str_idx[:-1]
1499
+ entries.append((prefix + str_idx, str(axis), str(limits[axis])))
1500
+ if prefix != '':
1501
+ prefix = '' # (len(str(k)) + 1) * ' '
1502
+ axis += 1
1503
+ linefmt = '{e0:>{w0}} {e1:>{w1}} {e2:>{w2}}'
1504
+ headers = ('key/index', 'axis', 'integration limits')
1505
+ w0 = max(len(ei[0]) for ei in entries)
1506
+ elif len(self.xsample.shape) > 1:
1507
+ for idx in numpy.ndindex(self.xsample.shape):
1508
+ str_idx = str(idx)[1:-1]
1509
+ str_idx = ''.join(str_idx.split(' '))
1510
+ if str_idx[-1] == ',':
1511
+ str_idx = str_idx[:-1]
1512
+ entries.append((str_idx, str(axis), str(limits[axis])))
1513
+ axis += 1
1514
+ linefmt = '{e0:>{w0}} {e1:>{w1}} {e2:>{w2}}'
1515
+ headers = ('key/index', 'axis', 'integration limits')
1516
+ w0 = max(len(ei[0]) for ei in entries)
1517
+ else:
1518
+ for axis,limits_axis in enumerate(limits):
1519
+ entries.append((None, str(axis), str(limits_axis)))
1520
+ linefmt = '{e1:>{w1}} {e2:>{w2}}'
1521
+ headers = (None, 'axis', 'integration limits')
1522
+ w0 = None
1523
+ w1 = max(len(ei[1]) for ei in entries)
1524
+ w2 = max(len(ei[2]) for ei in entries)
1525
+ ncol = 1 if self.map.dim <= 20 else 2
1526
+ table = ncol * [[]]
1527
+ nl = len(entries) // ncol
1528
+ if nl * ncol < len(entries):
1529
+ nl += 1
1530
+ ns = len(entries) - (ncol - 1) * nl
1531
+ ne = (ncol -1) * [nl] + [ns]
1532
+ iter_entries = iter(entries)
1533
+ for col in range(ncol):
1534
+ e0, e1, e2 = headers
1535
+ w0 = None if e0 is None else max(len(e0), w0)
1536
+ w1 = max(len(e1), w1)
1537
+ w2 = max(len(e2), w2)
1538
+ table[col] = [linefmt.format(e0=e0, w0=w0, e1=e1, w1=w1, e2=e2, w2=w2)]
1539
+ table[col].append(len(table[col][0]) * '-')
1540
+ for ii in range(ne[col]):
1541
+ e0, e1, e2 = next(iter_entries)
1542
+ table[col].append(linefmt.format(e0=e0, w0=w0, e1=e1, w1=w1, e2=e2, w2=w2))
1543
+ mtable = []
1544
+ ns += 2
1545
+ nl += 2
1546
+ for i in range(ns):
1547
+ mtable.append(' '.join([tabcol[i] for tabcol in table]))
1548
+ for i in range(ns, nl):
1549
+ mtable.append(' '.join([tabcol[i] for tabcol in table[:-1]]))
1550
+ ans += offset + ('\n' + offset).join(mtable) + '\n'
1551
+ # add grid data
1552
+ if ngrid > 0:
1553
+ ans += '\n' + self.map.settings(ngrid=ngrid)
1554
+ return ans
1555
+
1556
+ def _get_mpi_rank(self):
1557
+ try:
1558
+ import mpi4py.MPI
1559
+ return mpi4py.MPI.COMM_WORLD.Get_rank()
1560
+ except ImportError:
1561
+ return 0
1562
+
1563
+ mpi_rank = property(_get_mpi_rank, doc="MPI rank (>=0)")
1564
+
1565
+ def random_batch(
1566
+ Integrator self not None,
1567
+ bint yield_hcube=False,
1568
+ bint yield_y=False,
1569
+ # fcn = None,
1570
+ ):
1571
+ r""" Low-level batch iterator over integration points and weights.
1572
+
1573
+ This method creates an iterator that returns integration
1574
+ points from |vegas|, and their corresponding weights in an
1575
+ integral. The points are provided in arrays ``x[i, d]`` where
1576
+ ``i=0...`` labels the integration points in a batch
1577
+ and ``d=0...`` labels direction. The corresponding
1578
+ weights assigned by |vegas| to each point are provided
1579
+ in an array ``wgt[i]``.
1580
+
1581
+ Optionally the integrator will also return the indices of
1582
+ the hypercubes containing the integration points and/or the |y|-space
1583
+ coordinates of those points::
1584
+
1585
+ integ.random_batch() yields x, wgt
1586
+
1587
+ integ.random_batch(yield_hcube=True) yields x, wgt, hcube
1588
+
1589
+ integ.random_batch(yield_y=True) yields x, y, wgt
1590
+
1591
+ integ.random_batch(yield_hcube=True, yield_y=True) yields x, y, wgt, hcube
1592
+
1593
+ The number of integration points returned by the iterator
1594
+ corresponds to a single iteration. The number in a batch
1595
+ is controlled by parameter ``nhcube_batch``.
1596
+ """
1597
+ for t in self._random_batch(yield_hcube, yield_y):
1598
+ yield tuple(numpy.array(ti) for ti in t)
1599
+
1600
+ def _random_batch(
1601
+ Integrator self not None,
1602
+ bint yield_hcube=False,
1603
+ bint yield_y=False,
1604
+ # fcn = None,
1605
+ ):
1606
+ r""" Underlying implementation of generator :meth:`Integrator.random_batch`.
1607
+
1608
+ Only difference from ``random_batch()`` is that the values for
1609
+ ``x``, ``y``, etc. are returned here as memoryviews into internal buffers
1610
+ that are overwritten by subsequent iterations. ``random_batch()`` returns
1611
+ copies of the views that are not overwritten. ``_random_batch()`` is used
1612
+ internally to minimize memory and memory churn.
1613
+ """
1614
+ cdef Py_ssize_t nhcube = numpy.prod(self.nstrat)
1615
+ cdef double dv_y = 1. / nhcube
1616
+ # cdef Py_ssize_t min_neval_batch #= min(self.min_neval_batch, nhcube)
1617
+ cdef Py_ssize_t neval_batch # self.neval_batch
1618
+ cdef Py_ssize_t hcube_base
1619
+ cdef Py_ssize_t i_start, ihcube, i, d, tmp_hcube, hcube
1620
+ cdef Py_ssize_t[::1] hcube_array
1621
+ cdef double neval_sigf = (
1622
+ self.neval_frac * self.neval / self.sum_sigf
1623
+ if self.beta > 0 and self.sum_sigf > 0 and not self.adapt_to_errors
1624
+ else 0.0 # use min_neval_hcube (should not happen ever)
1625
+ )
1626
+ cdef Py_ssize_t avg_neval_hcube = int(self.neval / self.nhcube)
1627
+ cdef Py_ssize_t min_neval_batch = self.min_neval_batch # min_neval_batch * avg_neval_hcube ####
1628
+ cdef Py_ssize_t max_nhcube_batch = min_neval_batch // 2 + 1 ####
1629
+ cdef Py_ssize_t[::1] neval_hcube = self.neval_hcube
1630
+ cdef Py_ssize_t[::1] y0 = numpy.empty(self.dim, numpy.intp)
1631
+ cdef Py_ssize_t max_neval_hcube = max(
1632
+ self.max_neval_hcube, self.min_neval_hcube
1633
+ )
1634
+ cdef double[::1] sigf
1635
+ cdef double[:, ::1] yran
1636
+ cdef double[:, ::1] y
1637
+ cdef double[:, ::1] x
1638
+ cdef double[::1] jac
1639
+ cdef bint adaptive_strat = (self.beta > 0 and nhcube > 1 and not self.adapt_to_errors)
1640
+ ran_array_generator = (
1641
+ gvar.RNG.random
1642
+ if self.ran_array_generator is None else
1643
+ self.ran_array_generator
1644
+ )
1645
+ self.last_neval = 0
1646
+ self.neval_hcube_range = numpy.zeros(2, numpy.intp) + self.min_neval_hcube
1647
+ if yield_hcube:
1648
+ hcube_array = numpy.empty(self.y.shape[0], numpy.intp)
1649
+ # if adaptive_strat and self.minimize_mem and not self.adapt:
1650
+ ##### believe this was wrong idea; want to preserve adaptive strat if it exists
1651
+ # # can't minimize_mem without also adapting, so force beta=0
1652
+ # neval_sigf = 0.0
1653
+ neval_batch = 0
1654
+ hcube_base = 0
1655
+ sigf = self.sigf[hcube_base:hcube_base + max_nhcube_batch]
1656
+ for hcube in range(nhcube):
1657
+ ihcube = hcube - hcube_base
1658
+ # determine number of evaluations for h-cube
1659
+ if adaptive_strat:
1660
+ neval_hcube[ihcube] = <int> (sigf[ihcube] * neval_sigf) + self.min_neval_hcube
1661
+ if neval_hcube[ihcube] > max_neval_hcube:
1662
+ neval_hcube[ihcube] = max_neval_hcube
1663
+ if neval_hcube[ihcube] < self.neval_hcube_range[0]:
1664
+ self.neval_hcube_range[0] = neval_hcube[ihcube]
1665
+ elif neval_hcube[ihcube] > self.neval_hcube_range[1]:
1666
+ self.neval_hcube_range[1] = neval_hcube[ihcube]
1667
+ neval_batch += neval_hcube[ihcube]
1668
+ else:
1669
+ neval_hcube[ihcube] = avg_neval_hcube
1670
+ neval_batch += avg_neval_hcube
1671
+
1672
+ if neval_batch < min_neval_batch and hcube < nhcube - 1:
1673
+ # don't have enough points yet
1674
+ continue
1675
+
1676
+ ############################## have enough points => build yields
1677
+ self.last_neval += neval_batch
1678
+ nhcube_batch = hcube - hcube_base + 1
1679
+ if (3*self.dim + 3) * neval_batch * 2 > self.max_mem:
1680
+ raise MemoryError('work arrays larger than max_mem; reduce min_neval_batch or max_neval_hcube (or increase max_mem)')
1681
+
1682
+ # 1) resize work arrays if needed (to double what is needed)
1683
+ if neval_batch > self.y.shape[0]:
1684
+ self.y = numpy.empty((2 * neval_batch, self.dim), float)
1685
+ self.x = numpy.empty((2 * neval_batch, self.dim), float)
1686
+ self.jac = numpy.empty(2 * neval_batch, float)
1687
+ self.fdv2 = numpy.empty(2 * neval_batch, float)
1688
+ y = self.y
1689
+ x = self.x
1690
+ jac = self.jac
1691
+ if yield_hcube and neval_batch > hcube_array.shape[0]:
1692
+ hcube_array = numpy.empty(2 * neval_batch, numpy.intp)
1693
+
1694
+ # 2) generate random points
1695
+ yran = ran_array_generator((neval_batch, self.dim))
1696
+ i_start = 0
1697
+ for ihcube in range(nhcube_batch):
1698
+ tmp_hcube = hcube_base + ihcube
1699
+ for d in range(self.dim):
1700
+ y0[d] = tmp_hcube % self.nstrat[d]
1701
+ tmp_hcube = (tmp_hcube - y0[d]) // self.nstrat[d]
1702
+ for d in range(self.dim):
1703
+ for i in range(i_start, i_start + neval_hcube[ihcube]):
1704
+ y[i, d] = (y0[d] + yran[i, d]) / self.nstrat[d]
1705
+ i_start += neval_hcube[ihcube]
1706
+ self.map.map(y, x, jac, neval_batch)
1707
+
1708
+ # 3) compute weights and yield answers
1709
+ i_start = 0
1710
+ for ihcube in range(nhcube_batch):
1711
+ for i in range(i_start, i_start + neval_hcube[ihcube]):
1712
+ jac[i] *= dv_y / neval_hcube[ihcube]
1713
+ if yield_hcube:
1714
+ hcube_array[i] = hcube_base + ihcube
1715
+ i_start += neval_hcube[ihcube]
1716
+ answer = (x[:neval_batch, :],)
1717
+ if yield_y:
1718
+ answer += (y[:neval_batch, :],)
1719
+ answer += (jac[:neval_batch],)
1720
+ if yield_hcube:
1721
+ answer += (hcube_array[:neval_batch],)
1722
+ yield answer
1723
+
1724
+ # reset parameters for main loop
1725
+ if hcube < nhcube - 1:
1726
+ neval_batch = 0
1727
+ hcube_base = hcube + 1
1728
+ sigf = self.sigf[hcube_base:hcube_base + max_nhcube_batch]
1729
+
1730
+ # old name --- for legacy code
1731
+ random_vec = random_batch
1732
+
1733
+ def random(
1734
+ Integrator self not None, bint yield_hcube=False, bint yield_y=False
1735
+ ):
1736
+ r""" Low-level iterator over integration points and weights.
1737
+
1738
+ This method creates an iterator that returns integration
1739
+ points from |vegas|, and their corresponding weights in an
1740
+ integral. Each point ``x[d]`` is accompanied by the weight
1741
+ assigned to that point by |vegas| when estimating an integral.
1742
+ Optionally it will also return the index of the hypercube
1743
+ containing the integration point and/or the |y|-space
1744
+ coordinates::
1745
+
1746
+ integ.random() yields x, wgt
1747
+
1748
+ integ.random(yield_hcube=True) yields x, wgt, hcube
1749
+
1750
+ integ.random(yield_y=True) yields x, y, wgt
1751
+
1752
+ integ.random(yield_hcube=True, yield_y=True) yields x, y, wgt, hcube
1753
+
1754
+ The number of integration points returned by the iterator
1755
+ corresponds to a single iteration.
1756
+ """
1757
+ cdef double[:, ::1] x
1758
+ cdef double[::1] wgt
1759
+ cdef Py_ssize_t[::1] hcube
1760
+ cdef double[:, ::1] y
1761
+ cdef Py_ssize_t i
1762
+ if yield_hcube and yield_y:
1763
+ for x, y, wgt, hcube in self.random_batch(yield_hcube=True, yield_y=True):
1764
+ for i in range(x.shape[0]):
1765
+ yield (x[i], y[i], wgt[i], hcube[i])
1766
+ elif yield_y:
1767
+ for x, y, wgt in self.random_batch(yield_y=True):
1768
+ for i in range(x.shape[0]):
1769
+ yield (x[i], y[i], wgt[i])
1770
+ elif yield_hcube:
1771
+ for x, wgt, hcube in self.random_batch(yield_hcube=True):
1772
+ for i in range(x.shape[0]):
1773
+ yield (x[i], wgt[i], hcube[i])
1774
+ else:
1775
+ for x,wgt in self.random_batch():
1776
+ for i in range(x.shape[0]):
1777
+ yield (x[i], wgt[i])
1778
+
1779
+ def sample(self, nbatch=None, mode='rbatch'):
1780
+ r""" Generate random sample of integration weights and points.
1781
+
1782
+ Given a :class:`vegas.Integrator` called ``integ``, the code ::
1783
+
1784
+ wgt, x = integ.sample(mode='lbatch')
1785
+
1786
+ generates a random array of integration points ``x`` and the
1787
+ array of corresponding weights ``w`` such that ::
1788
+
1789
+ r = sum(wgt * f(x))
1790
+
1791
+ is an estimate of the integral of ``lbatch`` integrand ``f(x)``.
1792
+ Setting parameter ``mode='rbatch'`` formats ``x`` for use
1793
+ in ``rbatch`` integrands.
1794
+
1795
+ Parameter ``nbatch`` specifies the minimum number of integration
1796
+ points in the sample. The actual number is the smallest integer
1797
+ multiple of ``integ.last_neval`` that is equal to or larger than
1798
+ ``nbatch``.
1799
+ """
1800
+ neval = self.last_neval if self.last_neval > 0 else self.neval
1801
+ nbatch = neval if nbatch is None else int(nbatch)
1802
+ nit = nbatch // neval
1803
+ if nit * neval < nbatch:
1804
+ nit += 1
1805
+ samples = []
1806
+ wgts = []
1807
+ for _ in range(nit):
1808
+ for x, w in self.random_batch():
1809
+ samples.append(numpy.array(x))
1810
+ wgts.append(numpy.array(w))
1811
+ samples = numpy.concatenate(samples, axis=0)
1812
+ wgts = numpy.concatenate(wgts) / nit
1813
+ # need to fix following to allow other formats for x
1814
+ if self.xsample.shape is None:
1815
+ if mode == 'rbatch':
1816
+ samples = gvar.BufferDict(self.xsample, rbatch_buf=samples.T)
1817
+ else:
1818
+ samples = gvar.BufferDict(self.xsample, lbatch_buf=samples)
1819
+ else:
1820
+ if self.xsample.shape != ():
1821
+ if mode == 'rbatch':
1822
+ samples = samples.T
1823
+ samples.shape = self.xsample.shape + (-1,)
1824
+ else:
1825
+ samples.shape = (-1,) + self.xsample.shape
1826
+ return wgts, samples
1827
+
1828
+
1829
+ @staticmethod
1830
+ def synchronize_random():
1831
+ try:
1832
+ import mpi4py.MPI
1833
+ except ImportError:
1834
+ return
1835
+ comm = mpi4py.MPI.COMM_WORLD
1836
+ rank = comm.Get_rank()
1837
+ mpi_nproc = comm.Get_size()
1838
+ if mpi_nproc > 1:
1839
+ # synchronize random numbers
1840
+ if rank == 0:
1841
+ seed = gvar.ranseed(size=10)
1842
+ # seed = tuple(
1843
+ # gvar.randint(1, min(2**30, sys.maxsize), size=5)
1844
+ # )
1845
+ else:
1846
+ seed = None
1847
+ seed = comm.bcast(seed, root=0)
1848
+ gvar.ranseed(seed)
1849
+
1850
+ def _make_std_integrand(self, fcn, xsample=None):
1851
+ r""" Convert integrand ``fcn`` into an lbatch integrand.
1852
+
1853
+ Returns an object ``vi`` of type :class:`VegasIntegrand`.
1854
+ This object converts an arbitrary integrand ``fcn`` (``lbatch`, `rbatch`,
1855
+ and non-batch, with or without dictionaries for input or output)
1856
+ into a standard form: an lbatch integrand whose output is a
1857
+ 2-d lbatch array.
1858
+
1859
+ This is useful when building integrands that call other
1860
+ functions of the parameters. The latter are converted to
1861
+ lbatch integrands irrespective of what they were
1862
+ originally. This standardizes them, making it straightforward
1863
+ to build them into a new integrand.
1864
+ """
1865
+ if isinstance(fcn, VegasIntegrand):
1866
+ return fcn
1867
+ return VegasIntegrand(
1868
+ fcn=fcn,
1869
+ map=self.map,
1870
+ uses_jac=self.uses_jac,
1871
+ xsample=self.xsample if xsample is None else xsample,
1872
+ mpi=False if self.nproc > 1 else self.mpi
1873
+ )
1874
+
1875
+ def __call__(Integrator self not None, fcn, save=None, saveall=None, **kargs):
1876
+ r""" Integrate integrand ``fcn``.
1877
+
1878
+ A typical integrand has the form, for example::
1879
+
1880
+ def f(x):
1881
+ return x[0] ** 2 + x[1] ** 4
1882
+
1883
+ The argument ``x[d]`` is an integration point, where
1884
+ index ``d=0...`` represents direction within the
1885
+ integration volume.
1886
+
1887
+ Integrands can be array-valued, representing multiple
1888
+ integrands: e.g., ::
1889
+
1890
+ def f(x):
1891
+ return [x[0] ** 2, x[0] / x[1]]
1892
+
1893
+ The return arrays can have any shape. Dictionary-valued
1894
+ integrands are also supported: e.g., ::
1895
+
1896
+ def f(x):
1897
+ return dict(a=x[0] ** 2, b=[x[0] / x[1], x[1] / x[0]])
1898
+
1899
+ Integrand functions that return arrays or dictionaries
1900
+ are useful for multiple integrands that are closely related,
1901
+ and can lead to substantial reductions in the errors for
1902
+ ratios or differences of the results.
1903
+
1904
+ Integrand's take dictionaries as arguments when
1905
+ :class:`Integrator` keyword ``map`` is
1906
+ set equal to a dictionary. For example, with ::
1907
+
1908
+ map = dict(r=(0,1), theta=(0, np.pi), phi=(0, 2*np.pi))
1909
+
1910
+ the volume of a unit sphere is obtained by integrating ::
1911
+
1912
+ def f(xd):
1913
+ r = xd['r']
1914
+ theta = xd['theta']
1915
+ return r ** 2 * np.sin(theta)
1916
+
1917
+ It is usually much faster to use |vegas| in batch
1918
+ mode, where integration points are presented to the
1919
+ integrand in batches. A simple batch integrand might
1920
+ be, for example::
1921
+
1922
+ @vegas.lbatchintegrand
1923
+ def f(x):
1924
+ return x[:, 0] ** 2 + x[:, 1] ** 4
1925
+
1926
+ where decorator ``@vegas.lbatchintegrand`` tells
1927
+ |vegas| that the integrand processes integration
1928
+ points in batches. The array ``x[i, d]``
1929
+ represents a collection of different integration
1930
+ points labeled by ``i=0...``. (The number of points is controlled
1931
+ |Integrator| parameter ``min_neval_batch``.)
1932
+
1933
+ Batch mode is particularly useful (and fast) when the integrand
1934
+ is coded in Cython. Then loops over the integration points
1935
+ can be coded explicitly, avoiding the need to use
1936
+ :mod:`numpy`'s whole-array operators if they are not
1937
+ well suited to the integrand.
1938
+
1939
+ The batch index is always first (leftmost) for lbatch
1940
+ integrands, as above. It is also possible to create batch
1941
+ integrands where the batch index is the last (rightmost)
1942
+ index: for example, ::
1943
+
1944
+ @vegas.rbatchintegrand
1945
+ def f(x):
1946
+ return x[0, :] ** 2 + x[1, :] ** 4
1947
+
1948
+ Batch integrands can also be constructed from classes
1949
+ derived from :class:`vegas.LBatchIntegrand` or
1950
+ :class:`vegas.RBatchIntegrand`.
1951
+
1952
+ Any |vegas| parameter can also be reset: e.g.,
1953
+ ``self(fcn, nitn=20, neval=1e6)``.
1954
+
1955
+ Args:
1956
+ fcn (callable): Integrand function.
1957
+ save (str or file or None): Writes ``results`` into pickle file specified
1958
+ by ``save`` at the end of each iteration. For example, setting
1959
+ ``save='results.pkl'`` means that the results returned by the last
1960
+ vegas iteration can be reconstructed later using::
1961
+
1962
+ import pickle
1963
+ with open('results.pkl', 'rb') as ifile:
1964
+ results = pickle.load(ifile)
1965
+
1966
+ Ignored if ``save=None`` (default).
1967
+ saveall (str or file or None): Writes ``(results, integrator)`` into pickle
1968
+ file specified by ``saveall`` at the end of each iteration. For example,
1969
+ setting ``saveall='allresults.pkl'`` means that the results returned by
1970
+ the last vegas iteration, together with a clone of the (adapted) integrator,
1971
+ can be reconstructed later using::
1972
+
1973
+ import pickle
1974
+ with open('allresults.pkl', 'rb') as ifile:
1975
+ results, integrator = pickle.load(ifile)
1976
+
1977
+ Ignored if ``saveall=None`` (default).
1978
+
1979
+ Returns:
1980
+ Monte Carlo estimate of the integral of ``fcn(x)`` as
1981
+ an object of type :class:`vegas.RAvg`,
1982
+ :class:`vegas.RAvgArray`, or :class:`vegas.RAvgDict`.
1983
+ """
1984
+ cdef double[:, ::1] x
1985
+ # cdef double[:, ::1] jac
1986
+ cdef double[::1] wgt
1987
+ cdef Py_ssize_t[::1] hcube
1988
+
1989
+ cdef double[::1] sigf
1990
+ cdef double[:, ::1] y
1991
+ cdef double[::1] fdv2
1992
+ cdef double[:, ::1] fx
1993
+ cdef double[::1] dwf
1994
+ cdef double[::1] sum_wf
1995
+ cdef double[::1] sum_dwf
1996
+ cdef double[:, ::1] sum_dwf2
1997
+ cdef double[::1] mean = numpy.empty(1, float)
1998
+ cdef double[:, ::1] var = numpy.empty((1, 1), float)
1999
+ cdef Py_ssize_t itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
2000
+ cdef bint adaptive_strat
2001
+ cdef double sum_sigf, sigf2
2002
+ cdef bint firsteval = True
2003
+
2004
+ if kargs:
2005
+ self.set(kargs)
2006
+ if self.nproc > 1:
2007
+ old_defaults = self.set(mpi=False, min_neval_batch=self.nproc * self.min_neval_batch)
2008
+ elif self.mpi:
2009
+ pass
2010
+
2011
+ adaptive_strat = (
2012
+ self.beta > 0 and self.nhcube > 1
2013
+ and self.adapt and not self.adapt_to_errors
2014
+ )
2015
+
2016
+ # synchronize random numbers across all processes (mpi)
2017
+ if self.sync_ran and self.mpi:
2018
+ self.synchronize_random()
2019
+
2020
+ # Put integrand into standard form
2021
+ fcn = self._make_std_integrand(fcn)
2022
+ # fcn = VegasIntegrand(
2023
+ # fcn, map=self.map, uses_jac=self.uses_jac, xsample=self.xsample,
2024
+ # mpi=False if self.nproc > 1 else self.mpi
2025
+ # )
2026
+ fcn_size = fcn.size
2027
+
2028
+ # allocate work arrays
2029
+ dwf = numpy.empty(fcn_size, float)
2030
+ sum_wf = numpy.empty(fcn_size, float)
2031
+ sum_dwf = numpy.empty(fcn_size, float)
2032
+ sum_dwf2 = numpy.empty((fcn_size, fcn_size), float)
2033
+ mean = numpy.empty(fcn_size, float)
2034
+ var = numpy.empty((fcn_size, fcn_size), float)
2035
+ mean[:] = 0.0
2036
+ var[:, :] = 0.0
2037
+ result = VegasResult(fcn, weighted=self.adapt)
2038
+
2039
+ for itn in range(self.nitn):
2040
+ if self.analyzer is not None:
2041
+ self.analyzer.begin(itn, self)
2042
+
2043
+ # initalize arrays that accumulate results for a single iteration
2044
+ mean[:] = 0.0
2045
+ var[:, :] = 0.0
2046
+ sum_sigf = 0.0
2047
+
2048
+ # iterate batch-slices of integration points
2049
+ for x, y, wgt, hcube in self._random_batch(
2050
+ yield_hcube=True, yield_y=True, #fcn=fcn
2051
+ ):
2052
+ fdv2 = self.fdv2 # must be inside loop
2053
+ len_hcube = len(hcube)
2054
+
2055
+ # evaluate integrand at all points in x
2056
+ xa = numpy.asarray(x)
2057
+ if self.nproc > 1:
2058
+ nx = x.shape[0] // self.nproc + 1
2059
+ if self.uses_jac:
2060
+ jac1d = self.map.jac1d(y)
2061
+ results = self.pool.starmap(
2062
+ fcn.eval,
2063
+ [(xa[i*nx : (i+1)*nx], jac1d[i*nx : (i+1)*nx]) for i in range(self.nproc) if i*nx < xa.shape[0]],
2064
+ 1,
2065
+ )
2066
+ else:
2067
+ results = self.pool.starmap(
2068
+ fcn.eval,
2069
+ [(xa[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < xa.shape[0]],
2070
+ 1,
2071
+ )
2072
+ fx = numpy.concatenate(results, axis=0, dtype=float)
2073
+ else:
2074
+ # fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2075
+ fx = numpy.asarray(
2076
+ fcn.eval(xa, jac=self.map.jac1d(y) if self.uses_jac else None),
2077
+ dtype=float
2078
+ )
2079
+ # sanity check
2080
+ if numpy.any(numpy.isnan(fx)):
2081
+ raise ValueError('integrand evaluates to nan')
2082
+
2083
+ # compute integral and variance for each h-cube
2084
+ # j is index of point within batch, i is hcube index
2085
+ j = 0
2086
+ sigf = self.sigf[hcube[0]:hcube[-1] + 1]
2087
+ for i in range(hcube[0], hcube[-1] + 1):
2088
+ # iterate over h-cubes
2089
+ sum_wf[:] = 0.0
2090
+ sum_dwf[:] = 0.0
2091
+ sum_dwf2[:, :] = 0.0
2092
+ neval = 0
2093
+ jtmp = j
2094
+ while jtmp < len_hcube and hcube[jtmp] == i:
2095
+ # iterate over points in hypercube for mean and neval
2096
+ for s in range(fcn_size):
2097
+ sum_wf[s] += wgt[jtmp] * fx[jtmp, s]
2098
+ jtmp += 1
2099
+ neval += 1
2100
+ while j < len_hcube and hcube[j] == i:
2101
+ # iterate over points in hypercube for variances
2102
+ for s in range(fcn_size):
2103
+ dwf[s] = wgt[j] * fx[j, s] - sum_wf[s] / neval
2104
+ if abs(dwf[s]) < EPSILON * abs(sum_wf[s] / neval):
2105
+ dwf[s] = EPSILON * abs(sum_wf[s] / neval)
2106
+ sum_dwf2[s, s] += dwf[s] ** 2
2107
+ dwf[s] = 0. # kills off-diagonal covariances
2108
+ else:
2109
+ sum_dwf2[s, s] += dwf[s] ** 2
2110
+ sum_dwf[s] += dwf[s] # doesn't contribute if round-off
2111
+ for t in range(s):
2112
+ sum_dwf2[s, t] += dwf[s] * dwf[t]
2113
+ fdv2[j] = (wgt[j] * fx[j, 0] * neval) ** 2
2114
+ j += 1
2115
+ for s in range(fcn_size):
2116
+ # include Neely corrections (makes very little difference)
2117
+ mean[s] += sum_wf[s] + sum_dwf[s]
2118
+ for t in range(s + 1):
2119
+ var[s, t] += (neval * sum_dwf2[s, t] - sum_dwf[s] * sum_dwf[t]) / (neval - 1.)
2120
+ sigf2 = abs((neval * sum_dwf2[0, 0] - sum_dwf[0] * sum_dwf[0]) / (neval - 1.))
2121
+ if adaptive_strat:
2122
+ sigf[i - hcube[0]] = sigf2 ** (self.beta / 2.)
2123
+ sum_sigf += sigf[i - hcube[0]]
2124
+ if self.adapt_to_errors and self.adapt:
2125
+ # replace fdv2 with variance
2126
+ # only one piece of data (from current hcube)
2127
+ fdv2[j - 1] = sigf2
2128
+ self.map.add_training_data(
2129
+ y[j - 1:, :], fdv2[j - 1:], 1
2130
+ )
2131
+ if self.minimize_mem:
2132
+ self.sigf[hcube[0]:hcube[-1] + 1] = sigf[:]
2133
+ if (not self.adapt_to_errors) and self.adapt and self.alpha > 0:
2134
+ self.map.add_training_data(y, fdv2, y.shape[0])
2135
+
2136
+ for s in range(var.shape[0]):
2137
+ for t in range(s):
2138
+ var[t, s] = var[s, t]
2139
+
2140
+ # accumulate result from this iteration
2141
+ result.update(mean, var, self.last_neval)
2142
+
2143
+ if self.beta > 0 and not self.adapt_to_errors and self.adapt:
2144
+ if sum_sigf > 0:
2145
+ self.sum_sigf = sum_sigf
2146
+ else:
2147
+ # integrand appears to be a constant => even distribution of points
2148
+ self.sigf[:] = 1.
2149
+ self.sum_sigf = len(self.sigf)
2150
+ if self.alpha > 0 and self.adapt:
2151
+ self.map.adapt(alpha=self.alpha)
2152
+ if self.analyzer is not None:
2153
+ result.update_analyzer(self.analyzer)
2154
+
2155
+ if save is not None:
2156
+ result.save(save)
2157
+ if saveall is not None:
2158
+ result.saveall(self, saveall)
2159
+
2160
+ if result.converged(self.rtol, self.atol):
2161
+ break
2162
+ if self.nproc > 1:
2163
+ self.set(old_defaults)
2164
+ return result.result
2165
+
2166
+ class reporter:
2167
+ r""" Analyzer class that prints out a report, iteration
2168
+ by interation, on how vegas is doing. Parameter ngrid
2169
+ specifies how many x[i]'s to print out from the maps
2170
+ for each axis.
2171
+
2172
+ Args:
2173
+ ngrid (int): Number of grid nodes printed out for
2174
+ each direction. Default is 0.
2175
+ """
2176
+ def __init__(self, ngrid=0):
2177
+ self.ngrid = ngrid
2178
+ self.clock = time.perf_counter if hasattr(time, 'perf_counter') else time.time
2179
+ # self.clock = time.time
2180
+
2181
+ def begin(self, itn, integrator):
2182
+ self.integrator = integrator
2183
+ self.itn = itn
2184
+ self.t0 = self.clock()
2185
+ if itn==0:
2186
+ print(integrator.settings())
2187
+ sys.stdout.flush()
2188
+
2189
+ def end(self, itn_ans, ans):
2190
+ print(" itn %2d: %s\n all itn's: %s"%(self.itn+1, itn_ans, ans))
2191
+ print(
2192
+ ' neval = %s neval/h-cube = %s\n chi2/dof = %.2f Q = %.2f time = %.2f'
2193
+ % (
2194
+ format(self.integrator.last_neval, '.6g'),
2195
+ tuple(self.integrator.neval_hcube_range),
2196
+ ans.chi2 / ans.dof if ans.dof > 0 else 0,
2197
+ ans.Q if ans.dof > 0 else 1.,
2198
+ self.clock() - self.t0
2199
+ )
2200
+ )
2201
+ print(self.integrator.map.settings(ngrid=self.ngrid))
2202
+ print('')
2203
+ sys.stdout.flush()
2204
+
2205
+ # Objects for accumulating the results from multiple iterations of vegas.
2206
+ # Results can be scalars (RAvg), arrays (RAvgArray), or dictionaries (RAvgDict).
2207
+ # Each stores results from each iterations, as well as a weighted (running)
2208
+ # average of the results of all iterations (unless parameter weigthed=False,
2209
+ # in which case the average is unweighted).
2210
+ class RAvg(gvar.GVar):
2211
+ r""" Running average of scalar-valued Monte Carlo estimates.
2212
+
2213
+ This class accumulates independent Monte Carlo
2214
+ estimates (e.g., of an integral) and combines
2215
+ them into a single average. It
2216
+ is derived from :class:`gvar.GVar` (from
2217
+ the :mod:`gvar` module if it is present) and
2218
+ represents a Gaussian random variable.
2219
+
2220
+ Different estimates are weighted by their
2221
+ inverse variances if parameter ``weight=True``;
2222
+ otherwise straight, unweighted averages are used.
2223
+ """
2224
+ def __init__(self, weighted=True, itn_results=None, sum_neval=0, _rescale=True):
2225
+ # rescale not used here
2226
+ self.rescale = None
2227
+ if weighted:
2228
+ self._wlist = []
2229
+ self.weighted = True
2230
+ else:
2231
+ self._msum = 0.
2232
+ self._varsum = 0.
2233
+ self._n = 0
2234
+ self.weighted = False
2235
+ self._mlist = []
2236
+ self.itn_results = []
2237
+ if itn_results is None:
2238
+ super(RAvg, self).__init__(
2239
+ *gvar.gvar(0., 0.).internaldata,
2240
+ )
2241
+ else:
2242
+ if isinstance(itn_results, bytes):
2243
+ itn_results = gvar.loads(itn_results)
2244
+ for r in itn_results:
2245
+ self.add(r)
2246
+ self.sum_neval = sum_neval
2247
+
2248
+ def extend(self, ravg):
2249
+ r""" Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``. """
2250
+ for r in ravg.itn_results:
2251
+ self.add(r)
2252
+ self.sum_neval += ravg.sum_neval
2253
+
2254
+ def __reduce_ex__(self, protocol):
2255
+ return (
2256
+ RAvg,
2257
+ (self.weighted, gvar.dumps(self.itn_results, protocol=protocol), self.sum_neval)
2258
+ )
2259
+
2260
+ def _remove_gvars(self, gvlist):
2261
+ tmp = RAvg(
2262
+ weighted=self.weighted,
2263
+ itn_results=self.itn_results,
2264
+ sum_neval=self.sum_neval,
2265
+ )
2266
+ tmp.itn_results = gvar.remove_gvars(tmp.itn_results, gvlist)
2267
+ tgvar = gvar.gvar_factory() # small cov matrix
2268
+ super(RAvg, tmp).__init__(*tgvar(0,0).internaldata)
2269
+ return tmp
2270
+
2271
+ def _distribute_gvars(self, gvlist):
2272
+ return RAvg(
2273
+ weighted=self.weighted,
2274
+ itn_results = gvar.distribute_gvars(self.itn_results, gvlist),
2275
+ sum_neval=self.sum_neval,
2276
+ )
2277
+
2278
+ def _chi2(self):
2279
+ if len(self.itn_results) <= 1:
2280
+ return 0.0
2281
+ if self.weighted:
2282
+ wavg = self.mean
2283
+ ans = 0.0
2284
+ for m, w in zip(self._mlist, self._wlist):
2285
+ ans += (wavg - m) ** 2 * w
2286
+ return ans
2287
+ else:
2288
+ wavg = self.mean
2289
+ ans = numpy.sum([(m - wavg) ** 2 for m in self._mlist]) / (self._varsum / self._n)
2290
+ return ans
2291
+ chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
2292
+
2293
+ def _dof(self):
2294
+ return len(self.itn_results) - 1
2295
+ dof = property(
2296
+ _dof,
2297
+ None,
2298
+ None,
2299
+ "Number of degrees of freedom in weighted average."
2300
+ )
2301
+
2302
+ def _nitn(self):
2303
+ return len(self.itn_results)
2304
+ nitn = property(_nitn, None, None, "Number of iterations.")
2305
+
2306
+ def _Q(self):
2307
+ return (
2308
+ gvar.gammaQ(self.dof / 2., self.chi2 / 2.)
2309
+ if self.dof > 0 and self.chi2 >= 0
2310
+ else float('nan')
2311
+ )
2312
+ Q = property(
2313
+ _Q,
2314
+ None,
2315
+ None,
2316
+ "*Q* or *p-value* of weighted average's *chi**2*.",
2317
+ )
2318
+
2319
+ def _avg_neval(self):
2320
+ return self.sum_neval / self.nitn if self.nitn > 0 else 0
2321
+ avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2322
+
2323
+ def converged(self, rtol, atol):
2324
+ return self.sdev < atol + rtol * abs(self.mean)
2325
+
2326
+ def add(self, g):
2327
+ r""" Add estimate ``g`` to the running average. """
2328
+ self.itn_results.append(g)
2329
+ if isinstance(g, gvar.GVarRef):
2330
+ return
2331
+ self._mlist.append(g.mean)
2332
+ if self.weighted:
2333
+ self._wlist.append(1 / (g.var if g.var > TINY else TINY))
2334
+ var = 1. / numpy.sum(self._wlist)
2335
+ sdev = numpy.sqrt(var)
2336
+ mean = numpy.sum([w * m for w, m in zip(self._wlist, self._mlist)]) * var
2337
+ super(RAvg, self).__init__(*gvar.gvar(mean, sdev).internaldata)
2338
+ else:
2339
+ self._msum += g.mean
2340
+ self._varsum += g.var #if g.var > TINY else TINY
2341
+ self._n += 1
2342
+ mean = self._msum / self._n
2343
+ var = self._varsum / self._n ** 2
2344
+ super(RAvg, self).__init__(*gvar.gvar(mean, numpy.sqrt(var)).internaldata)
2345
+
2346
+ def summary(self, extended=False, weighted=None):
2347
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2348
+
2349
+ Args:
2350
+ weighted (bool): Display weighted averages of results from different
2351
+ iterations if ``True``; otherwise show unweighted averages.
2352
+ Default behavior is determined by |vegas|.
2353
+ """
2354
+ if weighted is None:
2355
+ weighted = self.weighted
2356
+ acc = RAvg(weighted=weighted)
2357
+ linedata = []
2358
+ for i, res in enumerate(self.itn_results):
2359
+ acc.add(res)
2360
+ if i > 0:
2361
+ chi2_dof = acc.chi2 / acc.dof
2362
+ Q = acc.Q
2363
+ else:
2364
+ chi2_dof = 0.0
2365
+ Q = 1.0
2366
+ itn = '%3d' % (i + 1)
2367
+ integral = '%-15s' % res
2368
+ wgtavg = '%-15s' % acc
2369
+ chi2dof = '%8.2f' % (acc.chi2 / acc.dof if i != 0 else 0.0)
2370
+ Q = '%8.2f' % (acc.Q if i != 0 else 1.0)
2371
+ linedata.append((itn, integral, wgtavg, chi2dof, Q))
2372
+ nchar = 5 * [0]
2373
+ for data in linedata:
2374
+ for i, d in enumerate(data):
2375
+ if len(d) > nchar[i]:
2376
+ nchar[i] = len(d)
2377
+ fmt = '%%%ds %%-%ds %%-%ds %%%ds %%%ds\n' % tuple(nchar)
2378
+ if weighted:
2379
+ ans = fmt % ('itn', 'integral', 'wgt average', 'chi2/dof', 'Q')
2380
+ else:
2381
+ ans = fmt % ('itn', 'integral', 'average', 'chi2/dof', 'Q')
2382
+ ans += len(ans[:-1]) * '-' + '\n'
2383
+ for data in linedata:
2384
+ ans += fmt % data
2385
+ return ans
2386
+
2387
+ class RAvgDict(gvar.BufferDict):
2388
+ r""" Running average of dictionary-valued Monte Carlo estimates.
2389
+
2390
+ This class accumulates independent dictionaries of Monte Carlo
2391
+ estimates (e.g., of an integral) and combines
2392
+ them into a dictionary of averages. It
2393
+ is derived from :class:`gvar.BufferDict`. The dictionary
2394
+ values are :class:`gvar.GVar`\s or arrays of :class:`gvar.GVar`\s.
2395
+
2396
+ Different estimates are weighted by their
2397
+ inverse covariance matrices if parameter ``weight=True``;
2398
+ otherwise straight, unweighted averages are used.
2399
+ """
2400
+ def __init__(self, dictionary=None, weighted=True, itn_results=None, sum_neval=0, rescale=True):
2401
+ if dictionary is None and (itn_results is None or len(itn_results) < 1):
2402
+ raise ValueError('must specificy dictionary or itn_results')
2403
+ super(RAvgDict, self).__init__(dictionary if dictionary is not None else itn_results[0])
2404
+ self.rarray = RAvgArray(shape=(self.size,), weighted=weighted, rescale=rescale)
2405
+ self.buf = numpy.asarray(self.rarray) # turns it into a normal ndarray
2406
+ self.itn_results = []
2407
+ self.weighted = weighted
2408
+ if itn_results is not None:
2409
+ if isinstance(itn_results, bytes):
2410
+ itn_results = gvar.loads(itn_results)
2411
+ for r in itn_results:
2412
+ self.add(r)
2413
+ self.sum_neval = sum_neval
2414
+
2415
+ def extend(self, ravg):
2416
+ r""" Merge results from :class:`RAvgDict` object ``ravg`` after results currently in ``self``. """
2417
+ for r in ravg.itn_results:
2418
+ self.add(r)
2419
+ self.sum_neval += ravg.sum_neval
2420
+
2421
+ def __reduce_ex__(self, protocol):
2422
+ return (
2423
+ RAvgDict,
2424
+ (super(RAvgDict, self), self.weighted, gvar.dumps(self.itn_results, protocol=protocol), self.sum_neval, self.rescale),
2425
+ )
2426
+
2427
+ def _remove_gvars(self, gvlist):
2428
+ tmp = RAvgDict(
2429
+ weighted=self.weighted,
2430
+ itn_results=[gvar.BufferDict(x) for x in self.itn_results],
2431
+ sum_neval=self.sum_neval,
2432
+ rescale=self.rescale,
2433
+ )
2434
+ tmp.rarray = gvar.remove_gvars(tmp.rarray, gvlist)
2435
+ tmp._buf = gvar.remove_gvars(tmp.buf, gvlist)
2436
+ return tmp
2437
+
2438
+ def _distribute_gvars(self, gvlist):
2439
+ self.rarray = gvar.distribute_gvars(self.rarray, gvlist)
2440
+ self._buf = gvar.distribute_gvars(self.buf, gvlist)
2441
+ return self
2442
+
2443
+ def converged(self, rtol, atol):
2444
+ return numpy.all(
2445
+ gvar.sdev(self.buf) <
2446
+ atol + rtol * numpy.abs(gvar.mean(self.buf))
2447
+ )
2448
+
2449
+ def add(self, g):
2450
+ if isinstance(g, gvar.BufferDict):
2451
+ newg = gvar.BufferDict(g)
2452
+ else:
2453
+ newg = gvar.BufferDict()
2454
+ for k in self:
2455
+ try:
2456
+ newg[k] = g[k]
2457
+ except AttributeError:
2458
+ raise ValueError(
2459
+ "Dictionary g doesn't contain key " + str(k) + '.'
2460
+ )
2461
+ self.itn_results.append(newg)
2462
+ self.rarray.add(newg.buf)
2463
+
2464
+ def summary(self, extended=False, weighted=None, rescale=None):
2465
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2466
+
2467
+ Args:
2468
+ extended (bool): Include a table of final averages for every
2469
+ component of the integrand if ``True``. Default is ``False``.
2470
+ weighted (bool): Display weighted averages of results from different
2471
+ iterations if ``True``; otherwise show unweighted averages.
2472
+ Default behavior is determined by |vegas|.
2473
+ """
2474
+ if weighted is None:
2475
+ weighted = self.weighted
2476
+ if rescale is None:
2477
+ rescale = self.rarray.rescale
2478
+ ans = self.rarray.summary(weighted=weighted, extended=False, rescale=rescale)
2479
+ if extended and self.itn_results[0].size > 1:
2480
+ ans += '\n' + gvar.tabulate(self) + '\n'
2481
+ return ans
2482
+
2483
+ def _chi2(self):
2484
+ return self.rarray.chi2
2485
+ chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
2486
+
2487
+ def _dof(self):
2488
+ return self.rarray.dof
2489
+ dof = property(
2490
+ _dof, None, None,
2491
+ "Number of degrees of freedom in weighted average."
2492
+ )
2493
+
2494
+ def _nitn(self):
2495
+ return len(self.itn_results)
2496
+ nitn = property(_nitn, None, None, "Number of iterations.")
2497
+
2498
+ def _Q(self):
2499
+ return self.rarray.Q
2500
+ Q = property(
2501
+ _Q, None, None,
2502
+ "*Q* or *p-value* of weighted average's *chi**2*.",
2503
+ )
2504
+
2505
+ def _avg_neval(self):
2506
+ return self.sum_neval / self.nitn if self.nitn > 0 else 0
2507
+ avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2508
+
2509
+ def _get_rescale(self):
2510
+ return self.rarray.rescale
2511
+ rescale = property(_get_rescale, None, None, "Integrals divided by ``rescale`` before doing weighted averages.")
2512
+
2513
+ class RAvgArray(numpy.ndarray):
2514
+ r""" Running average of array-valued Monte Carlo estimates.
2515
+
2516
+ This class accumulates independent arrays of Monte Carlo
2517
+ estimates (e.g., of an integral) and combines
2518
+ them into an array of averages. It
2519
+ is derived from :class:`numpy.ndarray`. The array
2520
+ elements are :class:`gvar.GVar`\s (from the ``gvar`` module if
2521
+ present) and represent Gaussian random variables.
2522
+
2523
+ Different estimates are weighted by their
2524
+ inverse covariance matrices if parameter ``weight=True``;
2525
+ otherwise straight, unweighted averages are used.
2526
+ """
2527
+ def __new__(
2528
+ subtype, shape=None,
2529
+ dtype=object, buffer=None, offset=0, strides=None, order=None,
2530
+ weighted=True, itn_results=None, sum_neval=0, rescale=True
2531
+ ):
2532
+ if shape is None and (itn_results is None or len(itn_results) < 1):
2533
+ raise ValueError('must specificy shape or itn_results')
2534
+ obj = numpy.ndarray.__new__(
2535
+ subtype, shape=shape if shape is not None else numpy.shape(itn_results[0]),
2536
+ dtype=object, buffer=buffer, offset=offset,
2537
+ strides=strides, order=order
2538
+ )
2539
+ if buffer is None:
2540
+ obj.flat = numpy.array(obj.size * [gvar.gvar(0,0)])
2541
+ obj.itn_results = []
2542
+ obj._mlist = []
2543
+ if rescale is False or rescale is None or not weighted:
2544
+ obj.rescale = None
2545
+ elif rescale is True:
2546
+ obj.rescale = True
2547
+ else:
2548
+ # flatten rescale
2549
+ if hasattr(rescale, 'keys'):
2550
+ obj.rescale = gvar.asbufferdict(rescale)
2551
+ else:
2552
+ obj.rescale = numpy.asarray(rescale)
2553
+ if weighted:
2554
+ obj.weighted = True
2555
+ obj._wlist = []
2556
+ else:
2557
+ obj._msum = 0.
2558
+ obj._covsum = 0.
2559
+ obj._n = 0
2560
+ obj.weighted = False
2561
+ obj.sum_neval = sum_neval
2562
+ return obj
2563
+
2564
+ def _remove_gvars(self, gvlist):
2565
+ tmp = RAvgArray(
2566
+ weighted=self.weighted,
2567
+ itn_results= [numpy.array(x) for x in self.itn_results],
2568
+ sum_neval=self.sum_neval,
2569
+ rescale=self.rescale
2570
+ )
2571
+ tmp.itn_results = gvar.remove_gvars(tmp.itn_results, gvlist)
2572
+ tmp.flat[:] = gvar.remove_gvars(numpy.array(tmp), gvlist)
2573
+ return tmp
2574
+
2575
+ def _distribute_gvars(self, gvlist):
2576
+ return RAvgArray(
2577
+ weighted=self.weighted,
2578
+ itn_results=gvar.distribute_gvars(self.itn_results, gvlist),
2579
+ sum_neval=self.sum_neval,
2580
+ rescale=self.rescale,
2581
+ )
2582
+
2583
+ def __reduce_ex__(self, protocol):
2584
+ save = numpy.array(self.flat[:])
2585
+ self.flat[:] = 0
2586
+ superpickled = super(RAvgArray, self).__reduce__()
2587
+ self.flat[:] = save
2588
+ state = superpickled[2] + (
2589
+ self.weighted, gvar.dumps(self.itn_results, protocol=protocol),
2590
+ (self.sum_neval, self.rescale),
2591
+ )
2592
+ return (superpickled[0], superpickled[1], state)
2593
+
2594
+ def __setstate__(self, state):
2595
+ super(RAvgArray, self).__setstate__(state[:-3])
2596
+ if isinstance(state[-1], tuple):
2597
+ self.sum_neval, self.rescale = state[-1]
2598
+ else:
2599
+ # included for compatibility with previous versions
2600
+ self.sum_neval = state[-1]
2601
+ self.rescale = True
2602
+ itn_results = gvar.loads(state[-2])
2603
+ self.weighted = state[-3]
2604
+ if self.weighted:
2605
+ self._wlist = []
2606
+ self._mlist = []
2607
+ else:
2608
+ self._msum = 0.
2609
+ self._covsum = 0.
2610
+ self._n = 0
2611
+ self.itn_results = []
2612
+ for r in itn_results:
2613
+ self.add(r)
2614
+
2615
+ def __array_finalize__(self, obj):
2616
+ if obj is None:
2617
+ return
2618
+ if obj.weighted:
2619
+ self.weighted = getattr(obj, 'weighted', True)
2620
+ self._wlist = getattr(obj, '_wlist', [])
2621
+ else:
2622
+ self._msum = getattr(obj, '_msum', 0.)
2623
+ self._covsum = getattr(obj, '_cov', 0.)
2624
+ self._n = getattr(obj, '_n', 0.)
2625
+ self.weighted = getattr(obj, 'weighted', False)
2626
+ self._mlist = getattr(obj, '_mlist', [])
2627
+ self.itn_results = getattr(obj, 'itn_results', [])
2628
+ self.sum_neval = getattr(obj, 'sum_neval', 0)
2629
+ self.rescale = getattr(obj, 'rescale', True)
2630
+
2631
+ def __init__(self, shape=None,
2632
+ dtype=object, buffer=None, offset=0, strides=None, order=None,
2633
+ weighted=True, itn_results=None, sum_neval=0, rescale=True):
2634
+ # needed because array_finalize can't handle self.add(r)
2635
+ self[:] *= 0
2636
+ if itn_results is not None:
2637
+ if isinstance(itn_results, bytes):
2638
+ itn_results = gvar.loads(itn_results)
2639
+ self.itn_results = []
2640
+ for r in itn_results:
2641
+ self.add(r)
2642
+
2643
+ def extend(self, ravg):
2644
+ r""" Merge results from :class:`RAvgArray` object ``ravg`` after results currently in ``self``. """
2645
+ for r in ravg.itn_results:
2646
+ self.add(r)
2647
+ self.sum_neval += ravg.sum_neval
2648
+
2649
+ def _w(self, matrix, rescale=False):
2650
+ " Decompose inverse matrix, with protection against singular matrices. "
2651
+ # extra factor of 1e4 is from trial and error with degenerate integrands (need extra buffer);
2652
+ # also negative svdcut and rescale=False are important for degenerate integrands
2653
+ # (alternative svdcut>0 and rescale=True introduces biases; also rescale=True not needed
2654
+ # since now have self.rescale)
2655
+ s = gvar.SVD(matrix, svdcut=-EPSILON * len(matrix) * 1e4, rescale=rescale)
2656
+ return s.decomp(-1)
2657
+
2658
+ def converged(self, rtol, atol):
2659
+ return numpy.all(
2660
+ gvar.sdev(self) < atol + rtol * numpy.abs(gvar.mean(self))
2661
+ )
2662
+
2663
+ def _chi2(self):
2664
+ if len(self.itn_results) <= 1:
2665
+ return 0.0
2666
+ if self.weighted:
2667
+ ans = 0.0
2668
+ wavg = gvar.mean(self).reshape((-1,))
2669
+ if self.rescale is not None:
2670
+ wavg /= self._rescale
2671
+ for ri, w, m in zip(self.itn_results, self._wlist, self._mlist):
2672
+ for wi in w:
2673
+ ans += wi.dot(m - wavg) ** 2
2674
+ return ans
2675
+ else:
2676
+ if self._invw is None:
2677
+ self._invw = self._w(self._covsum /self._n)
2678
+ wavg = gvar.mean(self).reshape((-1,))
2679
+ ans = 0.0
2680
+ for m in self._mlist:
2681
+ delta = wavg - m
2682
+ for invwi in self._invw:
2683
+ ans += invwi.dot(delta) ** 2
2684
+ return ans
2685
+ chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
2686
+
2687
+ def _dof(self):
2688
+ if len(self.itn_results) <= 1:
2689
+ return 0
2690
+ if not self.weighted:
2691
+ if self._invw is None:
2692
+ self._invw = self._w(self._covsum /self._n)
2693
+ return (len(self.itn_results) - 1) * len(self._invw)
2694
+ else:
2695
+ return numpy.sum([len(w) for w in self._wlist]) - self.size
2696
+ dof = property(
2697
+ _dof, None, None,
2698
+ "Number of degrees of freedom in weighted average."
2699
+ )
2700
+
2701
+ def _nitn(self):
2702
+ return len(self.itn_results)
2703
+ nitn = property(_nitn, None, None, "Number of iterations.")
2704
+
2705
+ def _Q(self):
2706
+ if self.dof <= 0 or self.chi2 < 0:
2707
+ return float('nan')
2708
+ return gvar.gammaQ(self.dof / 2., self.chi2 / 2.)
2709
+ Q = property(
2710
+ _Q, None, None,
2711
+ "*Q* or *p-value* of weighted average's *chi**2*.",
2712
+ )
2713
+
2714
+ def _avg_neval(self):
2715
+ return self.sum_neval / self.nitn if self.nitn > 0 else 0
2716
+ avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2717
+
2718
+ def add(self, g):
2719
+ r""" Add estimate ``g`` to the running average. """
2720
+ g = numpy.asarray(g)
2721
+ self.itn_results.append(g)
2722
+ if g.size > 1 and isinstance(g.flat[0], gvar.GVarRef):
2723
+ return
2724
+ g = g.reshape((-1,))
2725
+ if self.weighted:
2726
+ if not hasattr(self, '_rescale'):
2727
+ if self.rescale is not None:
2728
+ self._rescale = numpy.fabs(gvar.mean(g if self.rescale is True else self.rescale.flat[:]))
2729
+ gsdev = gvar.sdev(g)
2730
+ idx = gsdev > self._rescale
2731
+ self._rescale[idx] = gsdev[idx]
2732
+ self._rescale[self._rescale <= 0] = 1.
2733
+ else:
2734
+ self._rescale = 1.
2735
+ g = g / self._rescale
2736
+ gmean = gvar.mean(g)
2737
+ gcov = gvar.evalcov(g)
2738
+ for i in range(len(gcov)):
2739
+ if gcov[i,i] <= 0:
2740
+ gcov[i,i] = TINY
2741
+ self._mlist.append(gmean)
2742
+ self._wlist.append(self._w(gcov))
2743
+ invcov = numpy.sum([(w.T).dot(w) for w in self._wlist], axis=0)
2744
+ invw = self._w(invcov)
2745
+ cov = (invw.T).dot(invw)
2746
+ mean = 0.0
2747
+ for m, w in zip(self._mlist, self._wlist):
2748
+ for wj in w:
2749
+ wj_m = wj.dot(m)
2750
+ for invwi in invw:
2751
+ mean += invwi * invwi.dot(wj) * wj_m
2752
+ self[:] = (gvar.gvar(mean, cov) * self._rescale).reshape(self.shape)
2753
+ else:
2754
+ gmean = gvar.mean(g)
2755
+ gcov = gvar.evalcov(g)
2756
+ # idx = (gcov[numpy.diag_indices_from(gcov)] <= 0.0)
2757
+ # gcov[numpy.diag_indices_from(gcov)][idx] = TINY
2758
+ self._mlist.append(gmean)
2759
+ self._msum += gmean
2760
+ self._covsum += gcov
2761
+ self._invw = None
2762
+ self._n += 1
2763
+ mean = self._msum / self._n
2764
+ cov = self._covsum / (self._n ** 2)
2765
+ self[:] = gvar.gvar(mean, cov).reshape(self.shape)
2766
+
2767
+ def summary(self, extended=False, weighted=None, rescale=None):
2768
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2769
+
2770
+ Args:
2771
+ extended (bool): Include a table of final averages for every
2772
+ component of the integrand if ``True``. Default is ``False``.
2773
+ weighted (bool): Display weighted averages of results from different
2774
+ iterations if ``True``; otherwise show unweighted averages.
2775
+ Default behavior is determined by |vegas|.
2776
+ """
2777
+ if weighted is None:
2778
+ weighted = self.weighted
2779
+ if rescale is None:
2780
+ rescale = self.rescale
2781
+ acc = RAvgArray(self.shape, weighted=weighted, rescale=rescale)
2782
+
2783
+ linedata = []
2784
+ for i, res in enumerate(self.itn_results):
2785
+ acc.add(res)
2786
+ if i > 0:
2787
+ chi2_dof = acc.chi2 / acc.dof
2788
+ Q = acc.Q
2789
+ else:
2790
+ chi2_dof = 0.0
2791
+ Q = 1.0
2792
+ itn = '%3d' % (i + 1)
2793
+ integral = '%-15s' % res.flat[0]
2794
+ wgtavg = '%-15s' % acc.flat[0]
2795
+ chi2dof = '%8.2f' % (acc.chi2 / acc.dof if i != 0 else 0.0)
2796
+ Q = '%8.2f' % (acc.Q if i != 0 else 1.0)
2797
+ linedata.append((itn, integral, wgtavg, chi2dof, Q))
2798
+ nchar = 5 * [0]
2799
+ for data in linedata:
2800
+ for i, d in enumerate(data):
2801
+ if len(d) > nchar[i]:
2802
+ nchar[i] = len(d)
2803
+ fmt = '%%%ds %%-%ds %%-%ds %%%ds %%%ds\n' % tuple(nchar)
2804
+ if weighted:
2805
+ ans = fmt % ('itn', 'integral', 'wgt average', 'chi2/dof', 'Q')
2806
+ else:
2807
+ ans = fmt % ('itn', 'integral', 'average', 'chi2/dof', 'Q')
2808
+ ans += len(ans[:-1]) * '-' + '\n'
2809
+ for data in linedata:
2810
+ ans += fmt % data
2811
+ if extended and self.itn_results[0].size > 1:
2812
+ ans += '\n' + gvar.tabulate(self) + '\n'
2813
+ return ans
2814
+
2815
+ ################
2816
+ # Classes that standarize the interface for integrands. Internally vegas
2817
+ # assumes batch integrands that take an array x[i,d] as argument and
2818
+ # returns an array fx[i, d] where i = batch index and d = index over
2819
+ # dimenions or integrand components. VegasIntegrand figures out how
2820
+ # to convert the various types of integrand to this format. Integrands that
2821
+ # return scalars or arrays or dictionaries lead to integration results that
2822
+ # scalars or arrays or dictionaries, respectively; VegasResult figures
2823
+ # out how to convert the 1-d array used internally in vegas into the
2824
+ # appropriate structure given the integrand structure.
2825
+
2826
+ cdef class VegasResult:
2827
+ cdef readonly object integrand
2828
+ cdef readonly object shape
2829
+ cdef readonly object result
2830
+ cdef readonly double sum_neval
2831
+ """ Accumulated result object --- standard interface for integration results.
2832
+
2833
+ Integrands are flattened into 2-d arrays in |vegas|. This object
2834
+ accumulates integration results from multiple iterations of |vegas|
2835
+ and can convert them to the original integrand format. It also counts
2836
+ the number of integrand evaluations used in all and adds it to the
2837
+ result (``sum_neval``).
2838
+
2839
+ Args:
2840
+ integrand: :class:`VegasIntegrand` object.
2841
+ weighted (bool): use weighted average across iterations?
2842
+
2843
+ Attributes:
2844
+ shape: shape of integrand result or ``None`` if dictionary.
2845
+ result: accumulation of integral results. This is an object
2846
+ of type :class:`vegas.RAvgArray` for array-valued integrands,
2847
+ :class:`vegas.RAvgDict` for dictionary-valued integrands, and
2848
+ :class:`vegas.RAvg` for scalar-valued integrands.
2849
+ sum_neval: total number of integrand evaluations in all iterations.
2850
+ avg_neval: average number of integrand evaluations per iteration.
2851
+ """
2852
+ def __init__(self, integrand=None, weighted=None):
2853
+ self.integrand = integrand
2854
+ self.shape = integrand.shape
2855
+ self.sum_neval = 0
2856
+ if self.shape is None:
2857
+ self.result = RAvgDict(integrand.bdict, weighted=weighted)
2858
+ elif self.shape == ():
2859
+ self.result = RAvg(weighted=weighted)
2860
+ else:
2861
+ self.result = RAvgArray(self.shape, weighted=weighted)
2862
+
2863
+ def save(self, outfile):
2864
+ " pickle current results in ``outfile`` for later use. "
2865
+ if isinstance(outfile, str) or sys.version_info.major == 2:
2866
+ with open(outfile, 'wb') as ofile:
2867
+ pickle.dump(self.result, ofile)
2868
+ else:
2869
+ pickle.dump(self.result, outfile)
2870
+
2871
+ def saveall(self, integrator, outfile):
2872
+ " pickle current (results,integrator) in ``outfile`` for later use. "
2873
+ if isinstance(outfile, str) or sys.version_info.major == 2:
2874
+ with open(outfile, 'wb') as ofile:
2875
+ pickle.dump((self.result, integrator), ofile)
2876
+ else:
2877
+ pickle.dump((self.result, integrator), outfile)
2878
+
2879
+ def update(self, mean, var, last_neval=None):
2880
+ self.result.add(self.integrand.format_result(mean, var))
2881
+ if last_neval is not None:
2882
+ self.sum_neval += last_neval
2883
+ self.result.sum_neval = self.sum_neval
2884
+
2885
+ def update_analyzer(self, analyzer):
2886
+ r""" Update analyzer at end of an iteration. """
2887
+ analyzer.end(self.result.itn_results[-1], self.result)
2888
+
2889
+ def converged(self, rtol, atol):
2890
+ " Convergence test. "
2891
+ return self.result.converged(rtol, atol)
2892
+
2893
+ cdef class VegasIntegrand:
2894
+ cdef public object shape
2895
+ cdef public object fcntype
2896
+ cdef public Py_ssize_t size
2897
+ cdef public object eval
2898
+ cdef public object bdict
2899
+ cdef public int mpi_nproc # number of MPI processors
2900
+ cdef public int rank
2901
+ cdef public object comm
2902
+ """ Integand object --- standard interface for integrands
2903
+
2904
+ This class provides a standard interface for all |vegas| integrands.
2905
+ It analyzes the integrand to determine the shape of its output.
2906
+
2907
+ All integrands are converted to lbatch integrands. Method ``eval(x)``
2908
+ takes argument ``x[i,d]`` and returns `fx[i,c]`` where ``i``
2909
+ is the batch index, ``d`` indexes different directions in x-space,
2910
+ and ``c`` indexes the different components of the integrand.
2911
+
2912
+ The integrands are configured for parallel processing
2913
+ using MPI (via :mod:`mpi4py`) if ``mpi=True``.
2914
+
2915
+ Args:
2916
+ fcn: Integrand function.
2917
+ map: Integrator's :class:`AdaptiveMap`.
2918
+ uses_jac: Determines whether or not function call receives the Jacobian.
2919
+ xsample: Random point from x-space (properly formatted as dict or array).
2920
+ mpi: ``True`` if mpi might be used; ``False`` (default) otherwise.
2921
+
2922
+ Attributes:
2923
+ eval: ``eval(x)`` returns ``fcn(x)`` repacked as a 2-d array.
2924
+ shape: Shape of integrand ``fcn(x)`` or ``None`` if it is a dictionary.
2925
+ size: Size of integrand.
2926
+ mpi_nproc: Number of MPI processors (=1 if no MPI)
2927
+ rank: MPI rank of processors (=0 if no MPI)
2928
+ """
2929
+ def __init__(self, fcn, map, uses_jac, xsample, mpi):
2930
+ if isinstance(fcn, type(LBatchIntegrand)) or isinstance(fcn, type(RBatchIntegrand)):
2931
+ raise ValueError(
2932
+ 'integrand given is a class, not an object -- need to initialize?'
2933
+ )
2934
+ if mpi:
2935
+ try:
2936
+ import mpi4py.MPI
2937
+ self.comm = mpi4py.MPI.COMM_WORLD
2938
+ self.rank = self.comm.Get_rank()
2939
+ self.mpi_nproc = self.comm.Get_size()
2940
+ except ImportError:
2941
+ self.mpi_nproc = 1
2942
+ else:
2943
+ self.mpi_nproc = 1
2944
+
2945
+ # configure using sample evaluation fcn(x) to
2946
+ # determine integrand shape
2947
+
2948
+ # sample x, jac
2949
+ xsample = gvar.mean(xsample)
2950
+ x0 = xsample
2951
+ if uses_jac:
2952
+ if xsample.shape is None:
2953
+ jac0 = gvar.BufferDict(xsample, buf=xsample.size * [1])
2954
+ else:
2955
+ jac0 = numpy.ones(xsample.shape, dtype=float)
2956
+ else:
2957
+ jac0 = None
2958
+
2959
+ # configure self.eval
2960
+ self.fcntype = getattr(fcn, 'fcntype', 'scalar')
2961
+ if self.fcntype == 'scalar':
2962
+ fx = fcn(x0, jac=jac0) if uses_jac else fcn(x0)
2963
+ if hasattr(fx, 'keys'):
2964
+ if not isinstance(fx, gvar.BufferDict):
2965
+ fx = gvar.BufferDict(fx)
2966
+ self.size = fx.size
2967
+ self.shape = None
2968
+ self.bdict = fx
2969
+ _eval = _BatchIntegrand_from_NonBatchDict(fcn, self.size, xsample)
2970
+ else:
2971
+ fx = numpy.asarray(fx)
2972
+ self.shape = fx.shape
2973
+ self.size = fx.size
2974
+ _eval = _BatchIntegrand_from_NonBatch(fcn, self.size, self.shape, xsample)
2975
+ elif self.fcntype == 'rbatch':
2976
+ if x0.shape is None:
2977
+ x0 = gvar.BufferDict(x0, rbatch_buf=x0.buf.reshape(x0.buf.shape + (1,)))
2978
+ if uses_jac:
2979
+ jac0 = gvar.BufferDict(jac0, rbatch_buf=x0.buf.reshape(jac0.buf.shape + (1,)))
2980
+ else:
2981
+ x0 = x0.reshape(x0.shape + (1,))
2982
+ if uses_jac:
2983
+ jac0 = jac0.reshape(jac0.shape + (1,))
2984
+ fx = fcn(x0, jac=jac0) if uses_jac else fcn(x0)
2985
+ if hasattr(fx, 'keys'):
2986
+ # build dictionary for non-batch version of function
2987
+ fxs = gvar.BufferDict()
2988
+ for k in fx:
2989
+ fxs[k] = numpy.asarray(fx[k])[..., 0]
2990
+ self.shape = None
2991
+ self.bdict = fxs
2992
+ self.size = self.bdict.size
2993
+ _eval = _BatchIntegrand_from_BatchDict(fcn, self.bdict, rbatch=True, xsample=xsample)
2994
+ else:
2995
+ self.shape = numpy.shape(fx)[:-1]
2996
+ self.size = numpy.prod(self.shape, dtype=type(self.size))
2997
+ _eval = _BatchIntegrand_from_Batch(fcn, rbatch=True, xsample=xsample)
2998
+ else:
2999
+ if x0.shape is None:
3000
+ x0 = gvar.BufferDict(x0, lbatch_buf=x0.buf.reshape((1,) + x0.buf.shape ))
3001
+ if uses_jac:
3002
+ jac0 = gvar.BufferDict(jac0, lbatch_buf=x0.buf.reshape((1,) + jac0.buf.shape))
3003
+ else:
3004
+ x0 = x0.reshape((1,) + x0.shape)
3005
+ if uses_jac:
3006
+ jac0 = jac0.reshape((1,) + jac0.shape )
3007
+ fx = fcn(x0) if jac0 is None else fcn(x0, jac=jac0)
3008
+ if hasattr(fx, 'keys'):
3009
+ # build dictionary for non-batch version of function
3010
+ fxs = gvar.BufferDict()
3011
+ for k in fx:
3012
+ fxs[k] = fx[k][0]
3013
+ self.shape = None
3014
+ self.bdict = fxs
3015
+ self.size = self.bdict.size
3016
+ _eval = _BatchIntegrand_from_BatchDict(fcn, self.bdict, rbatch=False, xsample=xsample)
3017
+ else:
3018
+ fx = numpy.asarray(fx)
3019
+ self.shape = fx.shape[1:]
3020
+ self.size = numpy.prod(self.shape, dtype=type(self.size))
3021
+ _eval = _BatchIntegrand_from_Batch(fcn, rbatch=False, xsample=xsample)
3022
+ if self.mpi_nproc > 1:
3023
+ # MPI multiprocessor mode
3024
+ def _mpi_eval(x, jac, self=self, _eval=_eval):
3025
+ nx = x.shape[0] // self.mpi_nproc + 1
3026
+ i0 = self.rank * nx
3027
+ i1 = min(i0 + nx, x.shape[0])
3028
+ f = numpy.empty((nx, self.size), float)
3029
+ if i1 > i0:
3030
+ # fill f so long as haven't gone off end
3031
+ if jac is None:
3032
+ f[:(i1-i0)] = _eval(x[i0:i1], jac=None)
3033
+ else:
3034
+ f[:(i1-i0)] = _eval(x[i0:i1], jac=jac[i0:i1])
3035
+ results = numpy.empty((self.mpi_nproc * nx, self.size), float)
3036
+ self.comm.Allgather(f, results)
3037
+ return results[:x.shape[0]]
3038
+ self.eval = _mpi_eval
3039
+ else:
3040
+ self.eval = _eval
3041
+
3042
+ def _remove_gvars(self, gvlist):
3043
+ tmp = copy.copy(self)
3044
+ tmp.eval = gvar.remove_gvars(tmp.eval, gvlist)
3045
+ return tmp
3046
+
3047
+ def _distribute_gvars(self, gvlist):
3048
+ self.eval = gvar.distribute_gvars(self.eval, gvlist)
3049
+
3050
+ def __call__(self, x, jac=None):
3051
+ r""" Non-batch version of fcn """
3052
+ # repack x as lbatch array and evaluate function via eval
3053
+ if hasattr(x, 'keys'):
3054
+ x = gvar.asbufferdict(x)
3055
+ x = x.buf.reshape(1, -1)
3056
+ else:
3057
+ x = numpy.asarray(x).reshape(1, -1)
3058
+ fx = self.eval(x, jac=jac)
3059
+ return self.format_result(fx)
3060
+
3061
+ def format_result(self, mean, var=None):
3062
+ r""" Reformat output from integrator to correspond to original output format """
3063
+ if var is None:
3064
+ # mean is an ndarray
3065
+ if self.shape is None:
3066
+ return gvar.BufferDict(self.bdict, buf=mean.reshape(-1))
3067
+ elif self.shape == ():
3068
+ return mean.flat[0]
3069
+ else:
3070
+ return mean.reshape(self.shape)
3071
+ else:
3072
+ # from Integrator.call
3073
+ if self.shape is None:
3074
+ return gvar.BufferDict(self.bdict, buf=gvar.gvar(mean, var).reshape(-1))
3075
+ elif self.shape == ():
3076
+ return gvar.gvar(mean[0], var[0,0] ** 0.5)
3077
+ else:
3078
+ return gvar.gvar(mean, var).reshape(self.shape)
3079
+
3080
+ def format_evalx(self, evalx):
3081
+ r""" Reformat output from eval(x).
3082
+
3083
+ ``self.eval(x)`` returns an array ``evalx[i,d]`` where ``i`` is the batch index and ``d``
3084
+ labels different components of the ``self.fcn`` output. ``self.format_evalx(evalx)``
3085
+ reformats that output into a dictionary or array corresponding to the original output
3086
+ from ``self.fcn``.
3087
+ """
3088
+ if self.shape is None:
3089
+ return gvar.BufferDict(self.bdict, lbatch_buf=evalx)
3090
+ else:
3091
+ return evalx.reshape(evalx.shape[:1] + self.shape)
3092
+
3093
+ def training(self, x, jac):
3094
+ r""" Calculate first element of integrand at point ``x``. """
3095
+ fx =self.eval(x, jac=jac)
3096
+ if fx.ndim == 1:
3097
+ return fx
3098
+ else:
3099
+ fx = fx.reshape((x.shape[0], -1))
3100
+ return fx[:, 0]
3101
+
3102
+ # The _BatchIntegrand_from_XXXX objects are used by VegasIntegrand
3103
+ # to convert different types of integrand (ie, scalar vs array vs dict,
3104
+ # and nonbatch vs batch) to the standard output format assumed internally
3105
+ # in vegas.
3106
+ cdef class _BatchIntegrand_from_Base(object):
3107
+ cdef readonly object xsample
3108
+ cdef readonly bint dict_arg
3109
+ cdef readonly bint std_arg
3110
+ cdef public object fcn
3111
+ """ Base class for following classes -- manages xsample """
3112
+
3113
+ def __init__(self, fcn, xsample):
3114
+ self.fcn = fcn
3115
+ self.xsample = xsample
3116
+ if xsample.shape is None:
3117
+ self.dict_arg = True
3118
+ self.std_arg = False
3119
+ else:
3120
+ self.dict_arg = False
3121
+ self.std_arg = True if len(xsample.shape) == 1 else False
3122
+
3123
+ def _remove_gvars(self, gvlist):
3124
+ tmp = copy.copy(self)
3125
+ tmp.fcn = gvar.remove_gvars(tmp.fcn, gvlist)
3126
+ return tmp
3127
+
3128
+ def _distribute_gvars(self, gvlist):
3129
+ self.fcn = gvar.distribute_gvars(self.fcn, gvlist)
3130
+
3131
+ def non_std_arg_fcn(self, x, jac=None):
3132
+ " fcn(x) for non-standard non-batch functions "
3133
+ x = numpy.asarray(x)
3134
+ if self.dict_arg:
3135
+ xd = gvar.BufferDict(self.xsample, buf=x)
3136
+ if jac is not None:
3137
+ jacd = gvar.BufferDict(self.xsample, buf=jac)
3138
+ return self.fcn(xd, jacd)
3139
+ else:
3140
+ return self.fcn(xd)
3141
+ elif jac is None:
3142
+ return self.fcn(x.reshape(self.xsample.shape))
3143
+ else:
3144
+ return self.fcn(x.reshape(self.xsample.shape))
3145
+
3146
+ def non_std_arg_batch_fcn(self, x, jac=None):
3147
+ " fcn(x) for non-standard batch functions "
3148
+ x = numpy.asarray(x)
3149
+ if self.dict_arg:
3150
+ if self.rbatch:
3151
+ xd = gvar.BufferDict(self.xsample, rbatch_buf=x.T)
3152
+ if jac is not None:
3153
+ jac = gvar.BufferDict(self.xsample, rbatch_buf=jac.T)
3154
+ else:
3155
+ xd = gvar.BufferDict(self.xsample, lbatch_buf=x)
3156
+ if jac is not None:
3157
+ jac = gvar.BufferDict(self.xsample, lbatch_buf=jac)
3158
+ return self.fcn(xd) if jac is None else self.fcn(xd, jac=jac)
3159
+ else:
3160
+ if self.rbatch:
3161
+ sh = self.xsample.shape + (-1,)
3162
+ return self.fcn(x.T.reshape(sh)) if jac is None else self.fcn(x.T.reshape(sh), jac=jac.T.reshape(sh))
3163
+ else:
3164
+ sh = (-1,) + self.xsample.shape
3165
+ return self.fcn(x.reshape(sh)) if jac is None else self.fcn(x.reshape(sh), jac=jac.reshape(sh))
3166
+
3167
+ cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3168
+ cdef readonly Py_ssize_t size
3169
+ cdef readonly object shape
3170
+ """ Batch integrand from non-batch integrand. """
3171
+
3172
+ def __init__(self, fcn, size, shape, xsample):
3173
+ self.size = size
3174
+ self.shape = shape
3175
+ super(_BatchIntegrand_from_NonBatch, self).__init__(fcn, xsample)
3176
+
3177
+ def __call__(self, double[:, :] x, jac=None):
3178
+ cdef Py_ssize_t i, j
3179
+ cdef double[:, ::1] f
3180
+ cdef const double[:] fx
3181
+ _f = numpy.empty(
3182
+ (x.shape[0], self.size), float
3183
+ )
3184
+ f = _f
3185
+ if self.shape == ():
3186
+ # very common special case
3187
+ for i in range(x.shape[0]):
3188
+ if self.std_arg:
3189
+ f[i, 0] = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3190
+ else:
3191
+ f[i, 0] = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3192
+ else:
3193
+ for i in range(x.shape[0]):
3194
+ if self.std_arg:
3195
+ fx = numpy.asarray(
3196
+ self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3197
+ ).reshape((-1,))
3198
+ else:
3199
+ fx = numpy.asarray(
3200
+ self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3201
+ ).reshape((-1,))
3202
+ for j in range(len(fx)):
3203
+ f[i, j] = fx[j]
3204
+ return _f
3205
+
3206
+ cdef class _BatchIntegrand_from_NonBatchDict(_BatchIntegrand_from_Base):
3207
+ cdef readonly Py_ssize_t size
3208
+ """ Batch integrand from non-batch dict-integrand. """
3209
+
3210
+ def __init__(self, fcn, size, xsample=None):
3211
+ self.size = size
3212
+ super(_BatchIntegrand_from_NonBatchDict, self).__init__(fcn, xsample)
3213
+
3214
+ def __call__(self, double[:, :] x, jac=None):
3215
+ cdef Py_ssize_t i, j
3216
+ cdef double[:, ::1] f
3217
+ _f = numpy.empty(
3218
+ (x.shape[0], self.size), float
3219
+ )
3220
+ f = _f
3221
+ for i in range(x.shape[0]):
3222
+ if self.std_arg:
3223
+ fx = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3224
+ else:
3225
+ fx = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3226
+ if not isinstance(fx, gvar.BufferDict):
3227
+ fx = gvar.BufferDict(fx)
3228
+ for j in range(f.shape[1]):
3229
+ f[i, j] = fx.buf[j]
3230
+ return _f
3231
+
3232
+ cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3233
+ cdef readonly object shape
3234
+ cdef readonly bint rbatch
3235
+ """ batch integrand from batch function. """
3236
+
3237
+ def __init__(self, fcn, rbatch=False, xsample=None):
3238
+ self.rbatch = rbatch
3239
+ super(_BatchIntegrand_from_Batch, self).__init__(fcn, xsample)
3240
+
3241
+ def __call__(self, x, jac=None):
3242
+ # call fcn(x)
3243
+ if self.std_arg:
3244
+ if self.rbatch:
3245
+ fx = self.fcn(x.T) if jac is None else self.fcn(x.T, jac=jac.T)
3246
+ else:
3247
+ fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3248
+ else:
3249
+ fx = self.non_std_arg_batch_fcn(x, jac)
3250
+
3251
+ # process result
3252
+ if self.rbatch:
3253
+ # fx = self.fcn(x.T) if jac is None else self.fcn(x, jac=jac.T)
3254
+ if not isinstance(fx, numpy.ndarray):
3255
+ fx = numpy.asarray(fx)
3256
+ fx = fx.reshape((-1, x.shape[0]))
3257
+ return numpy.ascontiguousarray(fx.T)
3258
+ else:
3259
+ # fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3260
+ if not isinstance(fx, numpy.ndarray):
3261
+ fx = numpy.asarray(fx)
3262
+ return fx.reshape((x.shape[0], -1))
3263
+
3264
+
3265
+ cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3266
+ cdef readonly Py_ssize_t size
3267
+ cdef readonly object slice
3268
+ cdef readonly object shape
3269
+ cdef readonly bint rbatch
3270
+ """ batch integrand from batch dict-integrand. """
3271
+
3272
+ def __init__(self, fcn, bdict, rbatch=False, xsample=None):
3273
+ self.size = bdict.size
3274
+ self.rbatch = rbatch
3275
+ self.slice = collections.OrderedDict()
3276
+ self.shape = collections.OrderedDict()
3277
+ for k in bdict:
3278
+ self.slice[k], self.shape[k] = bdict.slice_shape(k)
3279
+ super(_BatchIntegrand_from_BatchDict, self).__init__(fcn, xsample)
3280
+
3281
+ def __call__(self, x, jac=None):
3282
+ cdef Py_ssize_t i
3283
+ # cdef double[:, ::1] buf
3284
+ buf = numpy.empty(
3285
+ (x.shape[0], self.size), float
3286
+ )
3287
+ # buf = _buf
3288
+ # call fcn(x)
3289
+ if self.std_arg:
3290
+ if self.rbatch:
3291
+ fx = self.fcn(x.T) if jac is None else self.fcn(x, jac=jac.T)
3292
+ else:
3293
+ fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3294
+ else:
3295
+ fx = self.non_std_arg_batch_fcn(x, jac)
3296
+
3297
+ # process result
3298
+ if self.rbatch:
3299
+ # fx = self.fcn(x.T) if jac is None else self.fcn(x.T, jac=jac.T)
3300
+ for k in self.slice:
3301
+ buf[:, self.slice[k]] = (
3302
+ fx[k]
3303
+ if self.shape[k] is () else
3304
+ numpy.reshape(fx[k], (-1, x.shape[0])).T
3305
+ )
3306
+ else:
3307
+ # fx = self.fcn(x) if jac is None else self.fcn(x, jac=jac)
3308
+ for k in self.slice:
3309
+ buf[:, self.slice[k]] = (
3310
+ fx[k]
3311
+ if self.shape[k] is () else
3312
+ numpy.asarray(fx[k]).reshape((x.shape[0], -1))
3313
+ )
3314
+ return buf
3315
+
3316
+ # LBatchIntegrand and RBatchIntegrand are container classes for batch integrands.
3317
+ cdef class LBatchIntegrand(object):
3318
+ r""" Wrapper for lbatch integrands.
3319
+
3320
+ Used by :func:`vegas.lbatchintegrand`.
3321
+
3322
+ :class:`vegas.LBatchIntegrand` is the same as
3323
+ :class:`vegas.BatchIntegrand`.
3324
+ """
3325
+ # cdef public object fcn
3326
+ def __init__(self, fcn=None):
3327
+ self.fcn = self if fcn is None else fcn
3328
+
3329
+ property fcntype:
3330
+ def __get__(self):
3331
+ return 'lbatch'
3332
+
3333
+ def __call__(self, *args, **kargs):
3334
+ return self.fcn(*args, **kargs)
3335
+
3336
+ def __getattr__(self, attr):
3337
+ return getattr(self.fcn, attr)
3338
+
3339
+ def lbatchintegrand(f):
3340
+ r""" Decorator for batch integrand functions.
3341
+
3342
+ Applying :func:`vegas.lbatchintegrand` to a function ``fcn`` repackages
3343
+ the function in a format that |vegas| can understand. Appropriate
3344
+ functions take a :mod:`numpy` array of integration points ``x[i, d]``
3345
+ as an argument, where ``i=0...`` labels the integration point and
3346
+ ``d=0...`` labels direction, and return an array ``f[i]`` of
3347
+ integrand values (or arrays of integrand values) for the corresponding
3348
+ points. The meaning of ``fcn(x)`` is unchanged by the decorator.
3349
+
3350
+ An example is ::
3351
+
3352
+ import vegas
3353
+ import numpy as np
3354
+
3355
+ @vegas.lbatchintegrand # or @vegas.lbatchintegrand
3356
+ def f(x):
3357
+ return np.exp(-x[:, 0] - x[:, 1])
3358
+
3359
+ for the two-dimensional integrand :math:`\exp(-x_0 - x_1)`.
3360
+ When integrands have dictionary arguments ``xd``, each element of the
3361
+ dictionary has an extra index (on the left): ``xd[key][:, ...]``.
3362
+
3363
+ :func:`vegas.batchintegrand` is the same as :func:`vegas.lbatchintegrand`.
3364
+ """
3365
+ try:
3366
+ f.fcntype = 'lbatch'
3367
+ return f
3368
+ except:
3369
+ return LBatchIntegrand(f)
3370
+
3371
+ cdef class RBatchIntegrand(object):
3372
+ r""" Same as :class:`vegas.LBatchIntegrand` but with batch indices on the right (not left). """
3373
+ # cdef public object fcn
3374
+ def __init__(self, fcn=None):
3375
+ self.fcn = self if fcn is None else fcn
3376
+
3377
+ property fcntype:
3378
+ def __get__(self):
3379
+ return 'rbatch'
3380
+
3381
+ def __call__(self, *args, **kargs):
3382
+ return self.fcn(*args, **kargs)
3383
+
3384
+ def __getattr__(self, attr):
3385
+ return getattr(self.fcn, attr)
3386
+
3387
+
3388
+ def rbatchintegrand(f):
3389
+ r""" Same as :func:`vegas.lbatchintegrand` but with batch indices on the right (not left). """
3390
+ try:
3391
+ f.fcntype = 'rbatch'
3392
+ return f
3393
+ except:
3394
+ return RBatchIntegrand(f)
3395
+
3396
+ # legacy names
3397
+ batchintegrand = lbatchintegrand
3398
+ BatchIntegrand = LBatchIntegrand
3399
+
3400
+ vecintegrand = batchintegrand
3401
+ MPIintegrand = batchintegrand
3402
+
3403
+ class VecIntegrand(LBatchIntegrand):
3404
+ pass