vegas 6.2__cp310-cp310-macosx_11_0_arm64.whl → 6.3__cp310-cp310-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vegas might be problematic. Click here for more details.

vegas/_vegas.pyx CHANGED
@@ -2,7 +2,7 @@
2
2
  # c#ython: profile=True
3
3
 
4
4
  # Created by G. Peter Lepage (Cornell University) in 12/2013.
5
- # Copyright (c) 2013-24 G. Peter Lepage.
5
+ # Copyright (c) 2013-25 G. Peter Lepage.
6
6
  #
7
7
  # This program is free software: you can redistribute it and/or modify
8
8
  # it under the terms of the GNU General Public License as published by
@@ -14,13 +14,10 @@
14
14
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
15
  # GNU General Public License for more details.
16
16
 
17
- cimport cython
18
- cimport numpy
19
- from libc.math cimport floor, log, abs, tanh, erf, exp, sqrt, lgamma
17
+ from libc.math cimport floor, log, abs, tanh, erf, exp, sqrt
20
18
 
21
19
  import collections
22
20
  import copy
23
- import functools
24
21
  import inspect
25
22
  import math
26
23
  import multiprocessing
@@ -34,18 +31,13 @@ import warnings
34
31
  import numpy
35
32
  import gvar
36
33
 
37
- if numpy.version.version >= '2.0':
38
- FLOAT_TYPE = numpy.float64
39
- else:
40
- FLOAT_TYPE = numpy.float_
41
-
42
34
  cdef double TINY = 10 ** (sys.float_info.min_10_exp + 50) # smallest and biggest
43
35
  cdef double HUGE = 10 ** (sys.float_info.max_10_exp - 50) # with extra headroom
44
36
  cdef double EPSILON = sys.float_info.epsilon * 1e4 # roundoff error threshold (see Schubert and Gertz Table 2)
45
37
 
46
38
  # AdaptiveMap is used by Integrator
47
39
  cdef class AdaptiveMap:
48
- """ Adaptive map ``y->x(y)`` for multidimensional ``y`` and ``x``.
40
+ r""" Adaptive map ``y->x(y)`` for multidimensional ``y`` and ``x``.
49
41
 
50
42
  An :class:`AdaptiveMap` defines a multidimensional map ``y -> x(y)``
51
43
  from the unit hypercube, with ``0 <= y[d] <= 1``, to an arbitrary
@@ -118,7 +110,7 @@ cdef class AdaptiveMap:
118
110
  ``ninc=None``, leaves the grid unchanged.
119
111
  """
120
112
  def __init__(self, grid, ninc=None):
121
- cdef numpy.npy_intp i, d, dim
113
+ cdef Py_ssize_t i, d, dim
122
114
  cdef double griddi
123
115
  if isinstance(grid, AdaptiveMap):
124
116
  self.ninc = numpy.array(grid.ninc)
@@ -149,8 +141,8 @@ cdef class AdaptiveMap:
149
141
  def __get__(self):
150
142
  return self.grid.shape[0]
151
143
 
152
- def region(self, numpy.npy_intp d=-1):
153
- """ x-space region.
144
+ def region(self, Py_ssize_t d=-1):
145
+ r""" x-space region.
154
146
 
155
147
  ``region(d)`` returns a tuple ``(xl,xu)`` specifying the ``x``-space
156
148
  interval covered by the map in direction ``d``. A list containing
@@ -163,7 +155,7 @@ cdef class AdaptiveMap:
163
155
 
164
156
  def extract_grid(self):
165
157
  " Return a list of lists specifying the map's grid. "
166
- cdef numpy.npy_intp d
158
+ cdef Py_ssize_t d
167
159
  grid = []
168
160
  for d in range(self.dim):
169
161
  ng = self.ninc[d] + 1
@@ -171,18 +163,18 @@ cdef class AdaptiveMap:
171
163
  return grid
172
164
 
173
165
  def __reduce__(self):
174
- """ Capture state for pickling. """
166
+ r""" Capture state for pickling. """
175
167
  return (AdaptiveMap, (self.extract_grid(),))
176
168
 
177
169
  def settings(self, ngrid=5):
178
- """ Create string with information about grid nodes.
170
+ r""" Create string with information about grid nodes.
179
171
 
180
172
  Creates a string containing the locations of the nodes
181
173
  in the map grid for each direction. Parameter
182
174
  ``ngrid`` specifies the maximum number of nodes to print
183
175
  (spread evenly over the grid).
184
176
  """
185
- cdef numpy.npy_intp d
177
+ cdef Py_ssize_t d
186
178
  ans = []
187
179
  if ngrid > 0:
188
180
  for d in range(self.dim):
@@ -211,15 +203,15 @@ cdef class AdaptiveMap:
211
203
  return self(y)
212
204
 
213
205
  def make_uniform(self, ninc=None):
214
- """ Replace the grid with a uniform grid.
206
+ r""" Replace the grid with a uniform grid.
215
207
 
216
208
  The new grid has ``ninc[d]`` (or ``ninc``, if it is a number)
217
209
  increments along each direction if ``ninc`` is specified.
218
210
  If ``ninc=None`` (default), the new grid has the same number
219
211
  of increments in each direction as the old grid.
220
212
  """
221
- cdef numpy.npy_intp i, d
222
- cdef numpy.npy_intp dim = self.grid.shape[0]
213
+ cdef Py_ssize_t i, d
214
+ cdef Py_ssize_t dim = self.grid.shape[0]
223
215
  cdef double[:] tmp
224
216
  cdef double[:, ::1] new_grid
225
217
  if ninc is None:
@@ -235,8 +227,8 @@ cdef class AdaptiveMap:
235
227
  "no of increments < 1 in AdaptiveMap -- %s"
236
228
  % str(ninc)
237
229
  )
238
- new_inc = numpy.empty((dim, max(ninc)), FLOAT_TYPE)
239
- new_grid = numpy.empty((dim, new_inc.shape[1] + 1), FLOAT_TYPE)
230
+ new_inc = numpy.empty((dim, max(ninc)), float)
231
+ new_grid = numpy.empty((dim, new_inc.shape[1] + 1), float)
240
232
  for d in range(dim):
241
233
  tmp = numpy.linspace(self.grid[d, 0], self.grid[d, self.ninc[d]], ninc[d] + 1)
242
234
  for i in range(ninc[d] + 1):
@@ -249,7 +241,7 @@ cdef class AdaptiveMap:
249
241
  self.clear()
250
242
 
251
243
  def __call__(self, y):
252
- """ Return ``x`` values corresponding to ``y``.
244
+ r""" Return ``x`` values corresponding to ``y``.
253
245
 
254
246
  ``y`` can be a single ``dim``-dimensional point, or it
255
247
  can be an array ``y[i,j, ..., d]`` of such points (``d=0..dim-1``).
@@ -261,17 +253,17 @@ cdef class AdaptiveMap:
261
253
  if y is None:
262
254
  y = gvar.RNG.random(size=self.dim)
263
255
  else:
264
- y = numpy.asarray(y, FLOAT_TYPE)
256
+ y = numpy.asarray(y, float)
265
257
  y_shape = y.shape
266
258
  y.shape = -1, y.shape[-1]
267
259
  x = 0 * y
268
- jac = numpy.empty(y.shape[0], FLOAT_TYPE)
260
+ jac = numpy.empty(y.shape[0], float)
269
261
  self.map(y, x, jac)
270
262
  x.shape = y_shape
271
263
  return x
272
264
 
273
265
  def jac1d(self, y):
274
- """ Return the map's Jacobian at ``y`` for each direction.
266
+ r""" Return the map's Jacobian at ``y`` for each direction.
275
267
 
276
268
  ``y`` can be a single ``dim``-dimensional point, or it
277
269
  can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
@@ -279,15 +271,15 @@ cdef class AdaptiveMap:
279
271
  (one-dimensional) Jacobian (``dx[d]/dy[d]``) corresponding
280
272
  to ``y[i,j,...,d]``.
281
273
  """
282
- cdef numpy.npy_intp dim = self.grid.shape[0]
283
- cdef numpy.npy_intp i, d, ninc, ny, iy
274
+ cdef Py_ssize_t dim = self.grid.shape[0]
275
+ cdef Py_ssize_t i, d, ninc, ny, iy
284
276
  cdef double y_ninc, dy_ninc
285
277
  cdef double[:,::1] jac
286
278
  y = numpy.asarray(y)
287
279
  y_shape = y.shape
288
280
  y.shape = -1, y.shape[-1]
289
281
  ny = y.shape[0]
290
- jac = numpy.empty(y.shape, FLOAT_TYPE)
282
+ jac = numpy.empty(y.shape, float)
291
283
  for i in range(ny):
292
284
  for d in range(dim):
293
285
  ninc = self.ninc[d]
@@ -303,7 +295,7 @@ cdef class AdaptiveMap:
303
295
  return ans
304
296
 
305
297
  def jac(self, y):
306
- """ Return the map's Jacobian at ``y``.
298
+ r""" Return the map's Jacobian at ``y``.
307
299
 
308
300
  ``y`` can be a single ``dim``-dimensional point, or it
309
301
  can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
@@ -320,9 +312,9 @@ cdef class AdaptiveMap:
320
312
  double[:, ::1] y,
321
313
  double[:, ::1] x,
322
314
  double[::1] jac,
323
- numpy.npy_intp ny=-1
315
+ Py_ssize_t ny=-1
324
316
  ):
325
- """ Map y to x, where jac is the Jacobian (``dx/dy``).
317
+ r""" Map y to x, where jac is the Jacobian (``dx/dy``).
326
318
 
327
319
  ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
328
320
  ``x[j, d]`` is filled with the corresponding ``x`` values,
@@ -344,9 +336,9 @@ cdef class AdaptiveMap:
344
336
  and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
345
337
  omitted (or negative).
346
338
  """
347
- cdef numpy.npy_intp ninc
348
- cdef numpy.npy_intp dim = self.inc.shape[0]
349
- cdef numpy.npy_intp i, iy, d
339
+ cdef Py_ssize_t ninc
340
+ cdef Py_ssize_t dim = self.inc.shape[0]
341
+ cdef Py_ssize_t i, iy, d
350
342
  cdef double y_ninc, dy_ninc, tmp_jac
351
343
  if ny < 0:
352
344
  ny = y.shape[0]
@@ -372,9 +364,9 @@ cdef class AdaptiveMap:
372
364
  double[:, ::1] x,
373
365
  double[:, ::1] y,
374
366
  double[::1] jac,
375
- numpy.npy_intp nx=-1
367
+ Py_ssize_t nx=-1
376
368
  ):
377
- """ Map x to y, where jac is the Jacobian (``dx/dy``).
369
+ r""" Map x to y, where jac is the Jacobian (``dx/dy``).
378
370
 
379
371
  ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
380
372
  ``x[j, d]`` is filled with the corresponding ``x`` values,
@@ -396,10 +388,10 @@ cdef class AdaptiveMap:
396
388
  and ``j=0...nx-1``. ``nx`` is set to ``x.shape[0]`` if it is
397
389
  omitted (or negative).
398
390
  """
399
- cdef numpy.npy_intp ninc
400
- cdef numpy.npy_intp dim = self.inc.shape[0]
401
- cdef numpy.npy_intp[:] iy
402
- cdef numpy.npy_intp i, iyi, d
391
+ cdef Py_ssize_t ninc
392
+ cdef Py_ssize_t dim = self.inc.shape[0]
393
+ cdef Py_ssize_t[:] iy
394
+ cdef Py_ssize_t i, iyi, d
403
395
  cdef double y_ninc, dy_ninc, tmp_jac
404
396
  if nx < 0:
405
397
  nx = x.shape[0]
@@ -430,9 +422,9 @@ cdef class AdaptiveMap:
430
422
  self,
431
423
  double[:, ::1] y,
432
424
  double[::1] f,
433
- numpy.npy_intp ny=-1,
425
+ Py_ssize_t ny=-1,
434
426
  ):
435
- """ Add training data ``f`` for ``y``-space points ``y``.
427
+ r""" Add training data ``f`` for ``y``-space points ``y``.
436
428
 
437
429
  Accumulates training data for later use by ``self.adapt()``.
438
430
  Grid increments will be made smaller in regions where
@@ -450,14 +442,14 @@ cdef class AdaptiveMap:
450
442
  and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
451
443
  omitted (or negative).
452
444
  """
453
- cdef numpy.npy_intp ninc
454
- cdef numpy.npy_intp dim = self.inc.shape[0]
455
- cdef numpy.npy_intp iy
456
- cdef numpy.npy_intp i, d
445
+ cdef Py_ssize_t ninc
446
+ cdef Py_ssize_t dim = self.inc.shape[0]
447
+ cdef Py_ssize_t iy
448
+ cdef Py_ssize_t i, d
457
449
  if self.sum_f is None:
458
450
  shape = (self.inc.shape[0], self.inc.shape[1])
459
- self.sum_f = numpy.zeros(shape, FLOAT_TYPE)
460
- self.n_f = numpy.zeros(shape, FLOAT_TYPE) + TINY
451
+ self.sum_f = numpy.zeros(shape, float)
452
+ self.n_f = numpy.zeros(shape, float) + TINY
461
453
  if ny < 0:
462
454
  ny = y.shape[0]
463
455
  elif ny > y.shape[0]:
@@ -473,7 +465,7 @@ cdef class AdaptiveMap:
473
465
 
474
466
  # @cython.boundscheck(False)
475
467
  def adapt(self, double alpha=0.0, ninc=None):
476
- """ Adapt grid to accumulated training data.
468
+ r""" Adapt grid to accumulated training data.
477
469
 
478
470
  ``self.adapt(...)`` projects the training data onto
479
471
  each axis independently and maps it into ``x`` space.
@@ -508,10 +500,10 @@ cdef class AdaptiveMap:
508
500
  cdef double[:, ::1] new_grid
509
501
  cdef double[::1] avg_f, tmp_f
510
502
  cdef double sum_f, acc_f, f_ninc
511
- cdef numpy.npy_intp old_ninc
512
- cdef numpy.npy_intp dim = self.grid.shape[0]
513
- cdef numpy.npy_intp i, j
514
- cdef numpy.npy_intp[:] new_ninc
503
+ cdef Py_ssize_t old_ninc
504
+ cdef Py_ssize_t dim = self.grid.shape[0]
505
+ cdef Py_ssize_t i, j
506
+ cdef Py_ssize_t[:] new_ninc
515
507
 
516
508
  # initialization
517
509
  if ninc is None:
@@ -525,12 +517,12 @@ cdef class AdaptiveMap:
525
517
  if min(new_ninc) < 1:
526
518
  raise ValueError('ninc < 1: ' + str(list(new_ninc)))
527
519
  if max(new_ninc) == 1:
528
- new_grid = numpy.empty((dim, 2), FLOAT_TYPE)
520
+ new_grid = numpy.empty((dim, 2), float)
529
521
  for d in range(dim):
530
522
  new_grid[d, 0] = self.grid[d, 0]
531
523
  new_grid[d, 1] = self.grid[d, self.ninc[d]]
532
524
  self.grid = numpy.asarray(new_grid)
533
- self.inc = numpy.empty((dim, 1), FLOAT_TYPE)
525
+ self.inc = numpy.empty((dim, 1), float)
534
526
  self.ninc = numpy.array(dim * [1], dtype=numpy.intp)
535
527
  for d in range(dim):
536
528
  self.inc[d, 0] = self.grid[d, 1] - self.grid[d, 0]
@@ -538,10 +530,10 @@ cdef class AdaptiveMap:
538
530
  return
539
531
 
540
532
  # smooth and regrid
541
- new_grid = numpy.empty((dim, max(new_ninc) + 1), FLOAT_TYPE)
542
- avg_f = numpy.ones(self.inc.shape[1], FLOAT_TYPE) # default = uniform
533
+ new_grid = numpy.empty((dim, max(new_ninc) + 1), float)
534
+ avg_f = numpy.ones(self.inc.shape[1], float) # default = uniform
543
535
  if alpha > 0 and max(self.ninc) > 1:
544
- tmp_f = numpy.empty(self.inc.shape[1], FLOAT_TYPE)
536
+ tmp_f = numpy.empty(self.inc.shape[1], float)
545
537
  for d in range(dim):
546
538
  old_ninc = self.ninc[d]
547
539
  if alpha != 0 and old_ninc > 1:
@@ -607,7 +599,7 @@ cdef class AdaptiveMap:
607
599
  self.n_f = None
608
600
 
609
601
  def show_grid(self, ngrid=40, axes=None, shrink=False, plotter=None):
610
- """ Display plots showing the current grid.
602
+ r""" Display plots showing the current grid.
611
603
 
612
604
  Args:
613
605
  ngrid (int): The number of grid nodes in each
@@ -734,7 +726,7 @@ cdef class AdaptiveMap:
734
726
  return plt
735
727
 
736
728
  def adapt_to_samples(self, x, f, nitn=5, alpha=1.0, nproc=1):
737
- """ Adapt map to data ``{x, f(x)}``.
729
+ r""" Adapt map to data ``{x, f(x)}``.
738
730
 
739
731
  Replace grid with one that is optimized for integrating
740
732
  function ``f(x)``. New grid is found iteratively
@@ -774,7 +766,7 @@ cdef class AdaptiveMap:
774
766
  on the machine (equivalent to ``nproc=os.cpu_count()``).
775
767
  Default value is ``nproc=1``. (Requires Python 3.3 or later.)
776
768
  """
777
- cdef numpy.npy_intp i, tmp_ninc, old_ninc
769
+ cdef Py_ssize_t i, tmp_ninc, old_ninc
778
770
  x = numpy.ascontiguousarray(x)
779
771
  if len(x.shape) != 2 or x.shape[1] != self.dim:
780
772
  raise ValueError('incompatible shape of x: {}'.format(x.shape))
@@ -840,7 +832,7 @@ cdef class AdaptiveMap:
840
832
  return (numpy.asarray(map.sum_f), numpy.asarray(map.n_f))
841
833
 
842
834
  cdef class Integrator(object):
843
- """ Adaptive multidimensional Monte Carlo integration.
835
+ r""" Adaptive multidimensional Monte Carlo integration.
844
836
 
845
837
  :class:`vegas.Integrator` objects make Monte Carlo
846
838
  estimates of multidimensional functions ``f(x)``
@@ -1030,16 +1022,28 @@ cdef class Integrator(object):
1030
1022
  is ``analyzer=None``.
1031
1023
  min_neval_batch (positive int): The minimum number of integration
1032
1024
  points to be passed together to the integrand when using
1033
- |vegas| in batch mode. The default value is 50,000. Larger
1025
+ |vegas| in batch mode. The default value is 100,000. Larger
1034
1026
  values may be lead to faster evaluations, but at the cost of
1035
- more memory for internal work arrays. The last batch is
1036
- usually smaller than this limit, as it is limited by ``neval``.
1027
+ more memory for internal work arrays. Batch sizes are all smaller
1028
+ than the lesser of ``min_neval_batch + max_neval_hcube`` and
1029
+ ``neval``. The last batch is usually smaller than this limit,
1030
+ as it is limited by ``neval``.
1037
1031
  max_neval_hcube (positive int): Maximum number of integrand
1038
1032
  evaluations per hypercube in the stratification. The default
1039
1033
  value is 50,000. Larger values might allow for more adaptation
1040
1034
  (when ``beta>0``), but also allow for more over-shoot when
1041
1035
  adapting to sharp peaks. Larger values also can result in
1042
1036
  large internal work arrasy.
1037
+ gpu_pad (bool): If ``True``, |vegas| batches are padded so that
1038
+ they are all the same size. The extra integrand evaluations
1039
+ for integration points in the pad are discarded; increase
1040
+ ``min_neval_batch`` or reduce ``max_neval_hcube`` to
1041
+ decrease the number of evaluations that are discarded.
1042
+ Padding is usually minimal when ``min_neval_batch`` is
1043
+ equal to or larger than ``neval``. Padding can make
1044
+ GPU-based integrands work much faster, but it makes
1045
+ other types of integrand run more slowly.
1046
+ Default is ``False``.
1043
1047
  minimize_mem (bool): When ``True``, |vegas| minimizes
1044
1048
  internal workspace by moving some of its data to
1045
1049
  a disk file. This increases execution time (slightly)
@@ -1108,8 +1112,9 @@ cdef class Integrator(object):
1108
1112
  map=None, # integration region, AdaptiveMap, or Integrator
1109
1113
  neval=1000, # number of evaluations per iteration
1110
1114
  maxinc_axis=1000, # number of adaptive-map increments per axis
1111
- min_neval_batch=50000, # min. number of evaluations per batch
1115
+ min_neval_batch=100000, # min. number of evaluations per batch
1112
1116
  max_neval_hcube=50000, # max number of evaluations per h-cube
1117
+ gpu_pad=False, # pad batches for use by GPUs
1113
1118
  neval_frac=0.75, # fraction of evaluations used for adaptive stratified sampling
1114
1119
  max_mem=1e9, # memory cutoff (# of floats)
1115
1120
  nitn=10, # number of iterations
@@ -1147,7 +1152,7 @@ cdef class Integrator(object):
1147
1152
  self.sum_sigf = numpy.sum(self.sigf)
1148
1153
  self.nstrat = numpy.array(map.nstrat)
1149
1154
  else:
1150
- self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1155
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1151
1156
  self.sum_sigf = HUGE
1152
1157
  args = dict(Integrator.defaults)
1153
1158
  if 'map' in args:
@@ -1174,11 +1179,11 @@ cdef class Integrator(object):
1174
1179
  self.sigf_h5.close()
1175
1180
  os.unlink(fname)
1176
1181
  self.sigf_h5 = None
1177
- self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1182
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1178
1183
  self.sum_sigf = HUGE
1179
1184
 
1180
1185
  def __reduce__(Integrator self not None):
1181
- """ Capture state for pickling. """
1186
+ r""" Capture state for pickling. """
1182
1187
  odict = dict()
1183
1188
  for k in Integrator.defaults:
1184
1189
  if k in ['map']:
@@ -1189,11 +1194,11 @@ cdef class Integrator(object):
1189
1194
  return (Integrator, (self.map,), odict)
1190
1195
 
1191
1196
  def __setstate__(Integrator self not None, odict):
1192
- """ Set state for unpickling. """
1197
+ r""" Set state for unpickling. """
1193
1198
  self.set(odict)
1194
1199
 
1195
1200
  def _set_map(self, map):
1196
- """ install new map, create xsample """
1201
+ r""" install new map, create xsample """
1197
1202
  if isinstance(map, AdaptiveMap):
1198
1203
  self.map = AdaptiveMap(map)
1199
1204
  self.xsample = numpy.empty(self.map.dim, dtype=float)
@@ -1240,7 +1245,7 @@ cdef class Integrator(object):
1240
1245
 
1241
1246
 
1242
1247
  def set(Integrator self not None, ka={}, **kargs):
1243
- """ Reset default parameters in integrator.
1248
+ r""" Reset default parameters in integrator.
1244
1249
 
1245
1250
  Usage is analogous to the constructor
1246
1251
  for |Integrator|: for example, ::
@@ -1376,7 +1381,7 @@ cdef class Integrator(object):
1376
1381
  # need to recalculate stratification distribution for beta>0
1377
1382
  # unless a new sigf was set
1378
1383
  old_val['sigf'] = self.sigf
1379
- self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1384
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1380
1385
  self.sum_sigf = HUGE
1381
1386
  self.nstrat = nstrat
1382
1387
 
@@ -1399,11 +1404,11 @@ cdef class Integrator(object):
1399
1404
 
1400
1405
  # neval_batch = self.nhcube_batch * avg_neval_hcube
1401
1406
  nsigf = self.nhcube
1402
- if self.beta > 0 and self.nhcube > 1 and not self.adapt_to_errors and len(self.sigf) != nsigf:
1407
+ if self.beta >= 0 and self.nhcube > 1 and not self.adapt_to_errors and len(self.sigf) != nsigf:
1403
1408
  # set up sigf
1404
1409
  self._clear_sigf_h5()
1405
1410
  if not self.minimize_mem:
1406
- self.sigf = numpy.ones(nsigf, FLOAT_TYPE)
1411
+ self.sigf = numpy.ones(nsigf, float)
1407
1412
  else:
1408
1413
  try:
1409
1414
  import h5py
@@ -1415,14 +1420,24 @@ cdef class Integrator(object):
1415
1420
  self.sum_sigf = nsigf
1416
1421
  self.neval_hcube = numpy.empty(self.min_neval_batch // 2 + 1, dtype=numpy.intp)
1417
1422
  self.neval_hcube[:] = avg_neval_hcube
1418
- self.y = numpy.empty((self.min_neval_batch, self.dim), FLOAT_TYPE)
1419
- self.x = numpy.empty((self.min_neval_batch, self.dim), FLOAT_TYPE)
1420
- self.jac = numpy.empty(self.min_neval_batch, FLOAT_TYPE)
1421
- self.fdv2 = numpy.empty(self.min_neval_batch, FLOAT_TYPE)
1423
+ # allocate work space
1424
+ # self.y = numpy.empty((self.min_neval_batch, self.dim), float)
1425
+ # self.x = numpy.empty((self.min_neval_batch, self.dim), float)
1426
+ # self.jac = numpy.empty(self.min_neval_batch, float)
1427
+ # self.fdv2 = numpy.empty(self.min_neval_batch, float)
1428
+ workspace = self.min_neval_batch + self.max_neval_hcube
1429
+ if workspace > self.neval:
1430
+ workspace = self.neval + 1
1431
+ if (3*self.dim + 3) * workspace + (0 if self.minimize_mem else self.nhcube) > self.max_mem:
1432
+ raise MemoryError('work arrays larger than max_mem; reduce min_neval_batch or max_neval_hcube (or increase max_mem)')
1433
+ self.y = numpy.empty((workspace, self.dim), float)
1434
+ self.x = numpy.empty((workspace, self.dim), float)
1435
+ self.jac = numpy.empty(workspace, float)
1436
+ self.fdv2 = numpy.empty(workspace, float)
1422
1437
  return old_val
1423
1438
 
1424
1439
  def settings(Integrator self not None, ngrid=0):
1425
- """ Assemble summary of integrator settings into string.
1440
+ r""" Assemble summary of integrator settings into string.
1426
1441
 
1427
1442
  Args:
1428
1443
  ngrid (int): Number of grid nodes in each direction
@@ -1431,7 +1446,7 @@ cdef class Integrator(object):
1431
1446
  Returns:
1432
1447
  String containing the settings.
1433
1448
  """
1434
- cdef numpy.npy_intp d
1449
+ cdef Py_ssize_t d
1435
1450
  nhcube = numpy.prod(self.nstrat)
1436
1451
  neval = nhcube * self.min_neval_hcube if self.beta <= 0 else self.neval
1437
1452
  ans = "Integrator Settings:\n"
@@ -1576,7 +1591,7 @@ cdef class Integrator(object):
1576
1591
  bint yield_y=False,
1577
1592
  # fcn = None,
1578
1593
  ):
1579
- """ Low-level batch iterator over integration points and weights.
1594
+ r""" Low-level batch iterator over integration points and weights.
1580
1595
 
1581
1596
  This method creates an iterator that returns integration
1582
1597
  points from |vegas|, and their corresponding weights in an
@@ -1602,24 +1617,41 @@ cdef class Integrator(object):
1602
1617
  corresponds to a single iteration. The number in a batch
1603
1618
  is controlled by parameter ``nhcube_batch``.
1604
1619
  """
1605
- cdef numpy.npy_intp nhcube = numpy.prod(self.nstrat)
1620
+ for t in self._random_batch(yield_hcube, yield_y):
1621
+ yield tuple(numpy.array(ti) for ti in t)
1622
+
1623
+ def _random_batch(
1624
+ Integrator self not None,
1625
+ bint yield_hcube=False,
1626
+ bint yield_y=False,
1627
+ # fcn = None,
1628
+ ):
1629
+ r""" Underlying implementation of generator :meth:`Integrator.random_batch`.
1630
+
1631
+ Only difference from ``random_batch()`` is that the values for
1632
+ ``x``, ``y``, etc. are returned here as memoryviews into internal buffers
1633
+ that are overwritten by subsequent iterations. ``random_batch()`` returns
1634
+ copies of the views that are not overwritten. ``_random_batch()`` is used
1635
+ internally to minimize memory and memory churn.
1636
+ """
1637
+ cdef Py_ssize_t nhcube = numpy.prod(self.nstrat)
1606
1638
  cdef double dv_y = 1. / nhcube
1607
- # cdef numpy.npy_intp min_neval_batch #= min(self.min_neval_batch, nhcube)
1608
- cdef numpy.npy_intp neval_batch # self.neval_batch
1609
- cdef numpy.npy_intp hcube_base
1610
- cdef numpy.npy_intp i_start, ihcube, i, d, tmp_hcube, hcube
1611
- cdef numpy.npy_intp[::1] hcube_array
1639
+ # cdef Py_ssize_t min_neval_batch #= min(self.min_neval_batch, nhcube)
1640
+ cdef Py_ssize_t neval_batch # self.neval_batch
1641
+ cdef Py_ssize_t hcube_base
1642
+ cdef Py_ssize_t i_start, ihcube, i, d, tmp_hcube, hcube
1643
+ cdef Py_ssize_t[::1] hcube_array
1612
1644
  cdef double neval_sigf = (
1613
1645
  self.neval_frac * self.neval / self.sum_sigf
1614
1646
  if self.beta > 0 and self.sum_sigf > 0 and not self.adapt_to_errors
1615
1647
  else 0.0 # use min_neval_hcube (should not happen ever)
1616
1648
  )
1617
- cdef numpy.npy_intp avg_neval_hcube = int(self.neval / self.nhcube)
1618
- cdef numpy.npy_intp min_neval_batch = self.min_neval_batch # min_neval_batch * avg_neval_hcube ####
1619
- cdef numpy.npy_intp max_nhcube_batch = min_neval_batch // 2 + 1 ####
1620
- cdef numpy.npy_intp[::1] neval_hcube = self.neval_hcube
1621
- cdef numpy.npy_intp[::1] y0 = numpy.empty(self.dim, numpy.intp)
1622
- cdef numpy.npy_intp max_neval_hcube = max(
1649
+ cdef Py_ssize_t avg_neval_hcube = int(self.neval / self.nhcube)
1650
+ cdef Py_ssize_t min_neval_batch = self.min_neval_batch # min_neval_batch * avg_neval_hcube ####
1651
+ cdef Py_ssize_t max_nhcube_batch = min_neval_batch // 2 + 1 ####
1652
+ cdef Py_ssize_t[::1] neval_hcube = self.neval_hcube
1653
+ cdef Py_ssize_t[::1] y0 = numpy.empty(self.dim, numpy.intp)
1654
+ cdef Py_ssize_t max_neval_hcube = max(
1623
1655
  self.max_neval_hcube, self.min_neval_hcube
1624
1656
  )
1625
1657
  cdef double[::1] sigf
@@ -1637,12 +1669,10 @@ cdef class Integrator(object):
1637
1669
  self.neval_hcube_range = numpy.zeros(2, numpy.intp) + self.min_neval_hcube
1638
1670
  if yield_hcube:
1639
1671
  hcube_array = numpy.empty(self.y.shape[0], numpy.intp)
1640
- if adaptive_strat and self.minimize_mem and not self.adapt:
1641
- # can't minimize_mem without also adapting, so force beta=0
1642
- neval_sigf = 0.0
1643
- # for hcube_base in range(0, nhcube, min_neval_batch):
1644
- # if (hcube_base + min_neval_batch) > nhcube:
1645
- # min_neval_batch = nhcube - hcube_base
1672
+ # if adaptive_strat and self.minimize_mem and not self.adapt:
1673
+ ##### believe this was wrong idea; want to preserve adaptive strat if it exists
1674
+ # # can't minimize_mem without also adapting, so force beta=0
1675
+ # neval_sigf = 0.0
1646
1676
  neval_batch = 0
1647
1677
  hcube_base = 0
1648
1678
  sigf = self.sigf[hcube_base:hcube_base + max_nhcube_batch]
@@ -1669,15 +1699,16 @@ cdef class Integrator(object):
1669
1699
  ############################## have enough points => build yields
1670
1700
  self.last_neval += neval_batch
1671
1701
  nhcube_batch = hcube - hcube_base + 1
1672
- if (3*self.dim + 3) * neval_batch * 2 > self.max_mem:
1673
- raise MemoryError('work arrays larger than max_mem; reduce min_neval_batch or max_neval_hcube (or increase max_mem)')
1702
+ # if (3*self.dim + 3) * neval_batch * 2 > self.max_mem:
1703
+ # raise MemoryError('work arrays larger than max_mem; reduce min_neval_batch or max_neval_hcube (or increase max_mem)')
1674
1704
 
1675
1705
  # 1) resize work arrays if needed (to double what is needed)
1676
1706
  if neval_batch > self.y.shape[0]:
1677
- self.y = numpy.empty((2 * neval_batch, self.dim), FLOAT_TYPE)
1678
- self.x = numpy.empty((2 * neval_batch, self.dim), FLOAT_TYPE)
1679
- self.jac = numpy.empty(2 * neval_batch, FLOAT_TYPE)
1680
- self.fdv2 = numpy.empty(2 * neval_batch, FLOAT_TYPE)
1707
+ print("XXX - shouldn't get here ever")
1708
+ self.y = numpy.empty((2 * neval_batch, self.dim), float)
1709
+ self.x = numpy.empty((2 * neval_batch, self.dim), float)
1710
+ self.jac = numpy.empty(2 * neval_batch, float)
1711
+ self.fdv2 = numpy.empty(2 * neval_batch, float)
1681
1712
  y = self.y
1682
1713
  x = self.x
1683
1714
  jac = self.jac
@@ -1706,12 +1737,12 @@ cdef class Integrator(object):
1706
1737
  if yield_hcube:
1707
1738
  hcube_array[i] = hcube_base + ihcube
1708
1739
  i_start += neval_hcube[ihcube]
1709
- answer = (numpy.asarray(x[:neval_batch, :]),)
1740
+ answer = (x[:neval_batch, :],)
1710
1741
  if yield_y:
1711
- answer += (numpy.asarray(y[:neval_batch, :]),)
1712
- answer += (numpy.asarray(jac[:neval_batch]),)
1742
+ answer += (y[:neval_batch, :],)
1743
+ answer += (jac[:neval_batch],)
1713
1744
  if yield_hcube:
1714
- answer += (numpy.asarray(hcube_array[:neval_batch]),)
1745
+ answer += (hcube_array[:neval_batch],)
1715
1746
  yield answer
1716
1747
 
1717
1748
  # reset parameters for main loop
@@ -1726,7 +1757,7 @@ cdef class Integrator(object):
1726
1757
  def random(
1727
1758
  Integrator self not None, bint yield_hcube=False, bint yield_y=False
1728
1759
  ):
1729
- """ Low-level iterator over integration points and weights.
1760
+ r""" Low-level iterator over integration points and weights.
1730
1761
 
1731
1762
  This method creates an iterator that returns integration
1732
1763
  points from |vegas|, and their corresponding weights in an
@@ -1749,9 +1780,9 @@ cdef class Integrator(object):
1749
1780
  """
1750
1781
  cdef double[:, ::1] x
1751
1782
  cdef double[::1] wgt
1752
- cdef numpy.npy_intp[::1] hcube
1783
+ cdef Py_ssize_t[::1] hcube
1753
1784
  cdef double[:, ::1] y
1754
- cdef numpy.npy_intp i
1785
+ cdef Py_ssize_t i
1755
1786
  if yield_hcube and yield_y:
1756
1787
  for x, y, wgt, hcube in self.random_batch(yield_hcube=True, yield_y=True):
1757
1788
  for i in range(x.shape[0]):
@@ -1770,7 +1801,7 @@ cdef class Integrator(object):
1770
1801
  yield (x[i], wgt[i])
1771
1802
 
1772
1803
  def sample(self, nbatch=None, mode='rbatch'):
1773
- """ Generate random sample of integration weights and points.
1804
+ r""" Generate random sample of integration weights and points.
1774
1805
 
1775
1806
  Given a :class:`vegas.Integrator` called ``integ``, the code ::
1776
1807
 
@@ -1841,7 +1872,7 @@ cdef class Integrator(object):
1841
1872
  gvar.ranseed(seed)
1842
1873
 
1843
1874
  def _make_std_integrand(self, fcn, xsample=None):
1844
- """ Convert integrand ``fcn`` into an lbatch integrand.
1875
+ r""" Convert integrand ``fcn`` into an lbatch integrand.
1845
1876
 
1846
1877
  Returns an object ``vi`` of type :class:`VegasIntegrand`.
1847
1878
  This object converts an arbitrary integrand ``fcn`` (``lbatch`, `rbatch`,
@@ -1866,7 +1897,7 @@ cdef class Integrator(object):
1866
1897
  )
1867
1898
 
1868
1899
  def __call__(Integrator self not None, fcn, save=None, saveall=None, **kargs):
1869
- """ Integrate integrand ``fcn``.
1900
+ r""" Integrate integrand ``fcn``.
1870
1901
 
1871
1902
  A typical integrand has the form, for example::
1872
1903
 
@@ -1974,10 +2005,11 @@ cdef class Integrator(object):
1974
2005
  an object of type :class:`vegas.RAvg`,
1975
2006
  :class:`vegas.RAvgArray`, or :class:`vegas.RAvgDict`.
1976
2007
  """
1977
- cdef numpy.ndarray[numpy.double_t, ndim=2] x
1978
- cdef numpy.ndarray[numpy.double_t, ndim=2] jac
1979
- cdef numpy.ndarray[numpy.double_t, ndim=1] wgt
1980
- cdef numpy.ndarray[numpy.npy_intp, ndim=1] hcube
2008
+ cdef double[:, ::1] x
2009
+ # cdef double[:, ::1] jac
2010
+ cdef double[::1] wgt
2011
+ cdef Py_ssize_t[::1] hcube
2012
+
1981
2013
  cdef double[::1] sigf
1982
2014
  cdef double[:, ::1] y
1983
2015
  cdef double[::1] fdv2
@@ -1986,15 +2018,16 @@ cdef class Integrator(object):
1986
2018
  cdef double[::1] sum_wf
1987
2019
  cdef double[::1] sum_dwf
1988
2020
  cdef double[:, ::1] sum_dwf2
1989
- cdef double[::1] mean = numpy.empty(1, FLOAT_TYPE)
1990
- cdef double[:, ::1] var = numpy.empty((1, 1), FLOAT_TYPE)
1991
- cdef numpy.npy_intp itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
2021
+ cdef double[::1] mean = numpy.empty(1, float)
2022
+ cdef double[:, ::1] var = numpy.empty((1, 1), float)
2023
+ cdef Py_ssize_t itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
1992
2024
  cdef bint adaptive_strat
1993
2025
  cdef double sum_sigf, sigf2
1994
2026
  cdef bint firsteval = True
1995
-
2027
+ cdef bint gpu_pad
1996
2028
  if kargs:
1997
2029
  self.set(kargs)
2030
+ gpu_pad = self.gpu_pad and (self.beta != 0) and (self.adapt == True)
1998
2031
  if self.nproc > 1:
1999
2032
  old_defaults = self.set(mpi=False, min_neval_batch=self.nproc * self.min_neval_batch)
2000
2033
  elif self.mpi:
@@ -2018,12 +2051,12 @@ cdef class Integrator(object):
2018
2051
  fcn_size = fcn.size
2019
2052
 
2020
2053
  # allocate work arrays
2021
- dwf = numpy.empty(fcn_size, FLOAT_TYPE)
2022
- sum_wf = numpy.empty(fcn_size, FLOAT_TYPE)
2023
- sum_dwf = numpy.empty(fcn_size, FLOAT_TYPE)
2024
- sum_dwf2 = numpy.empty((fcn_size, fcn_size), FLOAT_TYPE)
2025
- mean = numpy.empty(fcn_size, FLOAT_TYPE)
2026
- var = numpy.empty((fcn_size, fcn_size), FLOAT_TYPE)
2054
+ dwf = numpy.empty(fcn_size, float)
2055
+ sum_wf = numpy.empty(fcn_size, float)
2056
+ sum_dwf = numpy.empty(fcn_size, float)
2057
+ sum_dwf2 = numpy.empty((fcn_size, fcn_size), float)
2058
+ mean = numpy.empty(fcn_size, float)
2059
+ var = numpy.empty((fcn_size, fcn_size), float)
2027
2060
  mean[:] = 0.0
2028
2061
  var[:, :] = 0.0
2029
2062
  result = VegasResult(fcn, weighted=self.adapt)
@@ -2038,35 +2071,42 @@ cdef class Integrator(object):
2038
2071
  sum_sigf = 0.0
2039
2072
 
2040
2073
  # iterate batch-slices of integration points
2041
- for x, y, wgt, hcube in self.random_batch(
2074
+ for x, y, wgt, hcube in self._random_batch(
2042
2075
  yield_hcube=True, yield_y=True, #fcn=fcn
2043
2076
  ):
2044
2077
  fdv2 = self.fdv2 # must be inside loop
2045
2078
  len_hcube = len(hcube)
2046
2079
 
2047
2080
  # evaluate integrand at all points in x
2081
+ if gpu_pad:
2082
+ xa = numpy.asarray(self.x)
2083
+ xa[len(x):] = xa[len(x) - 1]
2084
+ else:
2085
+ xa = numpy.asarray(x)
2048
2086
  if self.nproc > 1:
2049
2087
  nx = x.shape[0] // self.nproc + 1
2050
2088
  if self.uses_jac:
2051
- jac = self.map.jac1d(y)
2089
+ jac1d = self.map.jac1d(y)
2052
2090
  results = self.pool.starmap(
2053
2091
  fcn.eval,
2054
- [(x[i*nx : (i+1)*nx], jac[i*nx : (i+1)*nx]) for i in range(self.nproc) if i*nx < x.shape[0]],
2092
+ [(xa[i*nx : (i+1)*nx], jac1d[i*nx : (i+1)*nx]) for i in range(self.nproc) if i*nx < xa.shape[0]],
2055
2093
  1,
2056
2094
  )
2057
2095
  else:
2058
2096
  results = self.pool.starmap(
2059
2097
  fcn.eval,
2060
- [(x[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < x.shape[0]],
2098
+ [(xa[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < xa.shape[0]],
2061
2099
  1,
2062
2100
  )
2063
2101
  fx = numpy.concatenate(results, axis=0, dtype=float)
2064
2102
  else:
2065
2103
  # fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2066
2104
  fx = numpy.asarray(
2067
- fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None),
2105
+ fcn.eval(xa, jac=self.map.jac1d(y) if self.uses_jac else None),
2068
2106
  dtype=float
2069
2107
  )
2108
+ if gpu_pad:
2109
+ fx = fx[:len(x)]
2070
2110
  # sanity check
2071
2111
  if numpy.any(numpy.isnan(fx)):
2072
2112
  raise ValueError('integrand evaluates to nan')
@@ -2155,7 +2195,7 @@ cdef class Integrator(object):
2155
2195
  return result.result
2156
2196
 
2157
2197
  class reporter:
2158
- """ Analyzer class that prints out a report, iteration
2198
+ r""" Analyzer class that prints out a report, iteration
2159
2199
  by interation, on how vegas is doing. Parameter ngrid
2160
2200
  specifies how many x[i]'s to print out from the maps
2161
2201
  for each axis.
@@ -2199,7 +2239,7 @@ class reporter:
2199
2239
  # average of the results of all iterations (unless parameter weigthed=False,
2200
2240
  # in which case the average is unweighted).
2201
2241
  class RAvg(gvar.GVar):
2202
- """ Running average of scalar-valued Monte Carlo estimates.
2242
+ r""" Running average of scalar-valued Monte Carlo estimates.
2203
2243
 
2204
2244
  This class accumulates independent Monte Carlo
2205
2245
  estimates (e.g., of an integral) and combines
@@ -2237,7 +2277,7 @@ class RAvg(gvar.GVar):
2237
2277
  self.sum_neval = sum_neval
2238
2278
 
2239
2279
  def extend(self, ravg):
2240
- """ Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``. """
2280
+ r""" Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``. """
2241
2281
  for r in ravg.itn_results:
2242
2282
  self.add(r)
2243
2283
  self.sum_neval += ravg.sum_neval
@@ -2315,7 +2355,7 @@ class RAvg(gvar.GVar):
2315
2355
  return self.sdev < atol + rtol * abs(self.mean)
2316
2356
 
2317
2357
  def add(self, g):
2318
- """ Add estimate ``g`` to the running average. """
2358
+ r""" Add estimate ``g`` to the running average. """
2319
2359
  self.itn_results.append(g)
2320
2360
  if isinstance(g, gvar.GVarRef):
2321
2361
  return
@@ -2335,7 +2375,7 @@ class RAvg(gvar.GVar):
2335
2375
  super(RAvg, self).__init__(*gvar.gvar(mean, numpy.sqrt(var)).internaldata)
2336
2376
 
2337
2377
  def summary(self, extended=False, weighted=None):
2338
- """ Assemble summary of results, iteration-by-iteration, into a string.
2378
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2339
2379
 
2340
2380
  Args:
2341
2381
  weighted (bool): Display weighted averages of results from different
@@ -2376,7 +2416,7 @@ class RAvg(gvar.GVar):
2376
2416
  return ans
2377
2417
 
2378
2418
  class RAvgDict(gvar.BufferDict):
2379
- """ Running average of dictionary-valued Monte Carlo estimates.
2419
+ r""" Running average of dictionary-valued Monte Carlo estimates.
2380
2420
 
2381
2421
  This class accumulates independent dictionaries of Monte Carlo
2382
2422
  estimates (e.g., of an integral) and combines
@@ -2404,7 +2444,7 @@ class RAvgDict(gvar.BufferDict):
2404
2444
  self.sum_neval = sum_neval
2405
2445
 
2406
2446
  def extend(self, ravg):
2407
- """ Merge results from :class:`RAvgDict` object ``ravg`` after results currently in ``self``. """
2447
+ r""" Merge results from :class:`RAvgDict` object ``ravg`` after results currently in ``self``. """
2408
2448
  for r in ravg.itn_results:
2409
2449
  self.add(r)
2410
2450
  self.sum_neval += ravg.sum_neval
@@ -2453,7 +2493,7 @@ class RAvgDict(gvar.BufferDict):
2453
2493
  self.rarray.add(newg.buf)
2454
2494
 
2455
2495
  def summary(self, extended=False, weighted=None, rescale=None):
2456
- """ Assemble summary of results, iteration-by-iteration, into a string.
2496
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2457
2497
 
2458
2498
  Args:
2459
2499
  extended (bool): Include a table of final averages for every
@@ -2502,7 +2542,7 @@ class RAvgDict(gvar.BufferDict):
2502
2542
  rescale = property(_get_rescale, None, None, "Integrals divided by ``rescale`` before doing weighted averages.")
2503
2543
 
2504
2544
  class RAvgArray(numpy.ndarray):
2505
- """ Running average of array-valued Monte Carlo estimates.
2545
+ r""" Running average of array-valued Monte Carlo estimates.
2506
2546
 
2507
2547
  This class accumulates independent arrays of Monte Carlo
2508
2548
  estimates (e.g., of an integral) and combines
@@ -2632,7 +2672,7 @@ class RAvgArray(numpy.ndarray):
2632
2672
  self.add(r)
2633
2673
 
2634
2674
  def extend(self, ravg):
2635
- """ Merge results from :class:`RAvgArray` object ``ravg`` after results currently in ``self``. """
2675
+ r""" Merge results from :class:`RAvgArray` object ``ravg`` after results currently in ``self``. """
2636
2676
  for r in ravg.itn_results:
2637
2677
  self.add(r)
2638
2678
  self.sum_neval += ravg.sum_neval
@@ -2707,7 +2747,7 @@ class RAvgArray(numpy.ndarray):
2707
2747
  avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2708
2748
 
2709
2749
  def add(self, g):
2710
- """ Add estimate ``g`` to the running average. """
2750
+ r""" Add estimate ``g`` to the running average. """
2711
2751
  g = numpy.asarray(g)
2712
2752
  self.itn_results.append(g)
2713
2753
  if g.size > 1 and isinstance(g.flat[0], gvar.GVarRef):
@@ -2756,7 +2796,7 @@ class RAvgArray(numpy.ndarray):
2756
2796
  self[:] = gvar.gvar(mean, cov).reshape(self.shape)
2757
2797
 
2758
2798
  def summary(self, extended=False, weighted=None, rescale=None):
2759
- """ Assemble summary of results, iteration-by-iteration, into a string.
2799
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2760
2800
 
2761
2801
  Args:
2762
2802
  extended (bool): Include a table of final averages for every
@@ -2874,7 +2914,7 @@ cdef class VegasResult:
2874
2914
  self.result.sum_neval = self.sum_neval
2875
2915
 
2876
2916
  def update_analyzer(self, analyzer):
2877
- """ Update analyzer at end of an iteration. """
2917
+ r""" Update analyzer at end of an iteration. """
2878
2918
  analyzer.end(self.result.itn_results[-1], self.result)
2879
2919
 
2880
2920
  def converged(self, rtol, atol):
@@ -2884,7 +2924,7 @@ cdef class VegasResult:
2884
2924
  cdef class VegasIntegrand:
2885
2925
  cdef public object shape
2886
2926
  cdef public object fcntype
2887
- cdef public numpy.npy_intp size
2927
+ cdef public Py_ssize_t size
2888
2928
  cdef public object eval
2889
2929
  cdef public object bdict
2890
2930
  cdef public int mpi_nproc # number of MPI processors
@@ -3016,14 +3056,14 @@ cdef class VegasIntegrand:
3016
3056
  nx = x.shape[0] // self.mpi_nproc + 1
3017
3057
  i0 = self.rank * nx
3018
3058
  i1 = min(i0 + nx, x.shape[0])
3019
- f = numpy.empty((nx, self.size), FLOAT_TYPE)
3059
+ f = numpy.empty((nx, self.size), float)
3020
3060
  if i1 > i0:
3021
3061
  # fill f so long as haven't gone off end
3022
3062
  if jac is None:
3023
3063
  f[:(i1-i0)] = _eval(x[i0:i1], jac=None)
3024
3064
  else:
3025
3065
  f[:(i1-i0)] = _eval(x[i0:i1], jac=jac[i0:i1])
3026
- results = numpy.empty((self.mpi_nproc * nx, self.size), FLOAT_TYPE)
3066
+ results = numpy.empty((self.mpi_nproc * nx, self.size), float)
3027
3067
  self.comm.Allgather(f, results)
3028
3068
  return results[:x.shape[0]]
3029
3069
  self.eval = _mpi_eval
@@ -3039,7 +3079,7 @@ cdef class VegasIntegrand:
3039
3079
  self.eval = gvar.distribute_gvars(self.eval, gvlist)
3040
3080
 
3041
3081
  def __call__(self, x, jac=None):
3042
- """ Non-batch version of fcn """
3082
+ r""" Non-batch version of fcn """
3043
3083
  # repack x as lbatch array and evaluate function via eval
3044
3084
  if hasattr(x, 'keys'):
3045
3085
  x = gvar.asbufferdict(x)
@@ -3050,7 +3090,7 @@ cdef class VegasIntegrand:
3050
3090
  return self.format_result(fx)
3051
3091
 
3052
3092
  def format_result(self, mean, var=None):
3053
- """ Reformat output from integrator to correspond to original output format """
3093
+ r""" Reformat output from integrator to correspond to original output format """
3054
3094
  if var is None:
3055
3095
  # mean is an ndarray
3056
3096
  if self.shape is None:
@@ -3069,7 +3109,7 @@ cdef class VegasIntegrand:
3069
3109
  return gvar.gvar(mean, var).reshape(self.shape)
3070
3110
 
3071
3111
  def format_evalx(self, evalx):
3072
- """ Reformat output from eval(x).
3112
+ r""" Reformat output from eval(x).
3073
3113
 
3074
3114
  ``self.eval(x)`` returns an array ``evalx[i,d]`` where ``i`` is the batch index and ``d``
3075
3115
  labels different components of the ``self.fcn`` output. ``self.format_evalx(evalx)``
@@ -3082,8 +3122,8 @@ cdef class VegasIntegrand:
3082
3122
  return evalx.reshape(evalx.shape[:1] + self.shape)
3083
3123
 
3084
3124
  def training(self, x, jac):
3085
- """ Calculate first element of integrand at point ``x``. """
3086
- cdef numpy.ndarray fx =self.eval(x, jac=jac)
3125
+ r""" Calculate first element of integrand at point ``x``. """
3126
+ fx =self.eval(x, jac=jac)
3087
3127
  if fx.ndim == 1:
3088
3128
  return fx
3089
3129
  else:
@@ -3119,8 +3159,9 @@ cdef class _BatchIntegrand_from_Base(object):
3119
3159
  def _distribute_gvars(self, gvlist):
3120
3160
  self.fcn = gvar.distribute_gvars(self.fcn, gvlist)
3121
3161
 
3122
- def non_std_arg_fcn(self, numpy.ndarray[numpy.double_t, ndim=1] x, jac=None):
3162
+ def non_std_arg_fcn(self, x, jac=None):
3123
3163
  " fcn(x) for non-standard non-batch functions "
3164
+ x = numpy.asarray(x)
3124
3165
  if self.dict_arg:
3125
3166
  xd = gvar.BufferDict(self.xsample, buf=x)
3126
3167
  if jac is not None:
@@ -3133,8 +3174,9 @@ cdef class _BatchIntegrand_from_Base(object):
3133
3174
  else:
3134
3175
  return self.fcn(x.reshape(self.xsample.shape))
3135
3176
 
3136
- def non_std_arg_batch_fcn(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3177
+ def non_std_arg_batch_fcn(self, x, jac=None):
3137
3178
  " fcn(x) for non-standard batch functions "
3179
+ x = numpy.asarray(x)
3138
3180
  if self.dict_arg:
3139
3181
  if self.rbatch:
3140
3182
  xd = gvar.BufferDict(self.xsample, rbatch_buf=x.T)
@@ -3154,7 +3196,7 @@ cdef class _BatchIntegrand_from_Base(object):
3154
3196
  return self.fcn(x.reshape(sh)) if jac is None else self.fcn(x.reshape(sh), jac=jac.reshape(sh))
3155
3197
 
3156
3198
  cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3157
- cdef readonly numpy.npy_intp size
3199
+ cdef readonly Py_ssize_t size
3158
3200
  cdef readonly object shape
3159
3201
  """ Batch integrand from non-batch integrand. """
3160
3202
 
@@ -3163,52 +3205,60 @@ cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3163
3205
  self.shape = shape
3164
3206
  super(_BatchIntegrand_from_NonBatch, self).__init__(fcn, xsample)
3165
3207
 
3166
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3167
- cdef numpy.npy_intp i
3168
- cdef numpy.ndarray[numpy.float_t, ndim=2] f = numpy.empty(
3169
- (x.shape[0], self.size), FLOAT_TYPE
3208
+ def __call__(self, double[:, :] x, jac=None):
3209
+ cdef Py_ssize_t i, j
3210
+ cdef double[:, ::1] f
3211
+ cdef const double[:] fx
3212
+ _f = numpy.empty(
3213
+ (x.shape[0], self.size), float
3170
3214
  )
3215
+ f = _f
3171
3216
  if self.shape == ():
3172
3217
  # very common special case
3173
3218
  for i in range(x.shape[0]):
3174
- if self.std_arg:
3175
- f[i] = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3176
- else:
3177
- f[i] = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3219
+ if self.std_arg:
3220
+ f[i, 0] = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3221
+ else:
3222
+ f[i, 0] = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3178
3223
  else:
3179
3224
  for i in range(x.shape[0]):
3180
3225
  if self.std_arg:
3181
- f[i] = numpy.asarray(
3182
- self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3226
+ fx = numpy.asarray(
3227
+ self.fcn(numpy.asarray(x[i])) if jac is None else self.fcn(numpy.asarray(x[i]), jac=jac[i])
3183
3228
  ).reshape((-1,))
3184
3229
  else:
3185
- f[i] = numpy.asarray(
3230
+ fx = numpy.asarray(
3186
3231
  self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3187
3232
  ).reshape((-1,))
3188
- return f
3233
+ for j in range(len(fx)):
3234
+ f[i, j] = fx[j]
3235
+ return _f
3189
3236
 
3190
3237
  cdef class _BatchIntegrand_from_NonBatchDict(_BatchIntegrand_from_Base):
3191
- cdef readonly numpy.npy_intp size
3238
+ cdef readonly Py_ssize_t size
3192
3239
  """ Batch integrand from non-batch dict-integrand. """
3193
3240
 
3194
3241
  def __init__(self, fcn, size, xsample=None):
3195
3242
  self.size = size
3196
3243
  super(_BatchIntegrand_from_NonBatchDict, self).__init__(fcn, xsample)
3197
3244
 
3198
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3199
- cdef numpy.npy_intp i
3200
- cdef numpy.ndarray[numpy.double_t, ndim=2] f = numpy.empty(
3245
+ def __call__(self, double[:, :] x, jac=None):
3246
+ cdef Py_ssize_t i, j
3247
+ cdef double[:, ::1] f
3248
+ _f = numpy.empty(
3201
3249
  (x.shape[0], self.size), float
3202
3250
  )
3251
+ f = _f
3203
3252
  for i in range(x.shape[0]):
3204
3253
  if self.std_arg:
3205
- fx = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3254
+ fx = self.fcn(numpy.asarray(x[i])) if jac is None else self.fcn(numpy.asarray(x[i]), jac=jac[i])
3206
3255
  else:
3207
3256
  fx = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3208
3257
  if not isinstance(fx, gvar.BufferDict):
3209
3258
  fx = gvar.BufferDict(fx)
3210
- f[i] = fx.buf
3211
- return f
3259
+ for j in range(f.shape[1]):
3260
+ f[i, j] = fx.buf[j]
3261
+ return _f
3212
3262
 
3213
3263
  cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3214
3264
  cdef readonly object shape
@@ -3219,7 +3269,7 @@ cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3219
3269
  self.rbatch = rbatch
3220
3270
  super(_BatchIntegrand_from_Batch, self).__init__(fcn, xsample)
3221
3271
 
3222
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3272
+ def __call__(self, x, jac=None):
3223
3273
  # call fcn(x)
3224
3274
  if self.std_arg:
3225
3275
  if self.rbatch:
@@ -3244,7 +3294,7 @@ cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3244
3294
 
3245
3295
 
3246
3296
  cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3247
- cdef readonly numpy.npy_intp size
3297
+ cdef readonly Py_ssize_t size
3248
3298
  cdef readonly object slice
3249
3299
  cdef readonly object shape
3250
3300
  cdef readonly bint rbatch
@@ -3259,11 +3309,13 @@ cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3259
3309
  self.slice[k], self.shape[k] = bdict.slice_shape(k)
3260
3310
  super(_BatchIntegrand_from_BatchDict, self).__init__(fcn, xsample)
3261
3311
 
3262
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3263
- cdef numpy.npy_intp i
3264
- cdef numpy.ndarray[numpy.double_t, ndim=2] buf = numpy.empty(
3312
+ def __call__(self, x, jac=None):
3313
+ cdef Py_ssize_t i
3314
+ # cdef double[:, ::1] buf
3315
+ buf = numpy.empty(
3265
3316
  (x.shape[0], self.size), float
3266
3317
  )
3318
+ # buf = _buf
3267
3319
  # call fcn(x)
3268
3320
  if self.std_arg:
3269
3321
  if self.rbatch:
@@ -3294,7 +3346,7 @@ cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3294
3346
 
3295
3347
  # LBatchIntegrand and RBatchIntegrand are container classes for batch integrands.
3296
3348
  cdef class LBatchIntegrand(object):
3297
- """ Wrapper for lbatch integrands.
3349
+ r""" Wrapper for lbatch integrands.
3298
3350
 
3299
3351
  Used by :func:`vegas.lbatchintegrand`.
3300
3352
 
@@ -3316,7 +3368,7 @@ cdef class LBatchIntegrand(object):
3316
3368
  return getattr(self.fcn, attr)
3317
3369
 
3318
3370
  def lbatchintegrand(f):
3319
- """ Decorator for batch integrand functions.
3371
+ r""" Decorator for batch integrand functions.
3320
3372
 
3321
3373
  Applying :func:`vegas.lbatchintegrand` to a function ``fcn`` repackages
3322
3374
  the function in a format that |vegas| can understand. Appropriate
@@ -3348,7 +3400,7 @@ def lbatchintegrand(f):
3348
3400
  return LBatchIntegrand(f)
3349
3401
 
3350
3402
  cdef class RBatchIntegrand(object):
3351
- """ Same as :class:`vegas.LBatchIntegrand` but with batch indices on the right (not left). """
3403
+ r""" Same as :class:`vegas.LBatchIntegrand` but with batch indices on the right (not left). """
3352
3404
  # cdef public object fcn
3353
3405
  def __init__(self, fcn=None):
3354
3406
  self.fcn = self if fcn is None else fcn
@@ -3365,7 +3417,7 @@ cdef class RBatchIntegrand(object):
3365
3417
 
3366
3418
 
3367
3419
  def rbatchintegrand(f):
3368
- """ Same as :func:`vegas.lbatchintegrand` but with batch indices on the right (not left). """
3420
+ r""" Same as :func:`vegas.lbatchintegrand` but with batch indices on the right (not left). """
3369
3421
  try:
3370
3422
  f.fcntype = 'rbatch'
3371
3423
  return f