vegas 6.1.3__cp310-cp310-win_amd64.whl → 6.2.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vegas might be problematic. Click here for more details.

vegas/_vegas.pyx CHANGED
@@ -2,7 +2,7 @@
2
2
  # c#ython: profile=True
3
3
 
4
4
  # Created by G. Peter Lepage (Cornell University) in 12/2013.
5
- # Copyright (c) 2013-24 G. Peter Lepage.
5
+ # Copyright (c) 2013-25 G. Peter Lepage.
6
6
  #
7
7
  # This program is free software: you can redistribute it and/or modify
8
8
  # it under the terms of the GNU General Public License as published by
@@ -14,13 +14,10 @@
14
14
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
15
  # GNU General Public License for more details.
16
16
 
17
- cimport cython
18
- cimport numpy
19
- from libc.math cimport floor, log, abs, tanh, erf, exp, sqrt, lgamma
17
+ from libc.math cimport floor, log, abs, tanh, erf, exp, sqrt
20
18
 
21
19
  import collections
22
20
  import copy
23
- import functools
24
21
  import inspect
25
22
  import math
26
23
  import multiprocessing
@@ -34,18 +31,13 @@ import warnings
34
31
  import numpy
35
32
  import gvar
36
33
 
37
- if numpy.version.version >= '2.0':
38
- FLOAT_TYPE = numpy.float64
39
- else:
40
- FLOAT_TYPE = numpy.float_
41
-
42
34
  cdef double TINY = 10 ** (sys.float_info.min_10_exp + 50) # smallest and biggest
43
35
  cdef double HUGE = 10 ** (sys.float_info.max_10_exp - 50) # with extra headroom
44
36
  cdef double EPSILON = sys.float_info.epsilon * 1e4 # roundoff error threshold (see Schubert and Gertz Table 2)
45
37
 
46
38
  # AdaptiveMap is used by Integrator
47
39
  cdef class AdaptiveMap:
48
- """ Adaptive map ``y->x(y)`` for multidimensional ``y`` and ``x``.
40
+ r""" Adaptive map ``y->x(y)`` for multidimensional ``y`` and ``x``.
49
41
 
50
42
  An :class:`AdaptiveMap` defines a multidimensional map ``y -> x(y)``
51
43
  from the unit hypercube, with ``0 <= y[d] <= 1``, to an arbitrary
@@ -118,7 +110,7 @@ cdef class AdaptiveMap:
118
110
  ``ninc=None``, leaves the grid unchanged.
119
111
  """
120
112
  def __init__(self, grid, ninc=None):
121
- cdef numpy.npy_intp i, d, dim
113
+ cdef Py_ssize_t i, d, dim
122
114
  cdef double griddi
123
115
  if isinstance(grid, AdaptiveMap):
124
116
  self.ninc = numpy.array(grid.ninc)
@@ -149,8 +141,8 @@ cdef class AdaptiveMap:
149
141
  def __get__(self):
150
142
  return self.grid.shape[0]
151
143
 
152
- def region(self, numpy.npy_intp d=-1):
153
- """ x-space region.
144
+ def region(self, Py_ssize_t d=-1):
145
+ r""" x-space region.
154
146
 
155
147
  ``region(d)`` returns a tuple ``(xl,xu)`` specifying the ``x``-space
156
148
  interval covered by the map in direction ``d``. A list containing
@@ -163,7 +155,7 @@ cdef class AdaptiveMap:
163
155
 
164
156
  def extract_grid(self):
165
157
  " Return a list of lists specifying the map's grid. "
166
- cdef numpy.npy_intp d
158
+ cdef Py_ssize_t d
167
159
  grid = []
168
160
  for d in range(self.dim):
169
161
  ng = self.ninc[d] + 1
@@ -171,18 +163,18 @@ cdef class AdaptiveMap:
171
163
  return grid
172
164
 
173
165
  def __reduce__(self):
174
- """ Capture state for pickling. """
166
+ r""" Capture state for pickling. """
175
167
  return (AdaptiveMap, (self.extract_grid(),))
176
168
 
177
169
  def settings(self, ngrid=5):
178
- """ Create string with information about grid nodes.
170
+ r""" Create string with information about grid nodes.
179
171
 
180
172
  Creates a string containing the locations of the nodes
181
173
  in the map grid for each direction. Parameter
182
174
  ``ngrid`` specifies the maximum number of nodes to print
183
175
  (spread evenly over the grid).
184
176
  """
185
- cdef numpy.npy_intp d
177
+ cdef Py_ssize_t d
186
178
  ans = []
187
179
  if ngrid > 0:
188
180
  for d in range(self.dim):
@@ -211,15 +203,15 @@ cdef class AdaptiveMap:
211
203
  return self(y)
212
204
 
213
205
  def make_uniform(self, ninc=None):
214
- """ Replace the grid with a uniform grid.
206
+ r""" Replace the grid with a uniform grid.
215
207
 
216
208
  The new grid has ``ninc[d]`` (or ``ninc``, if it is a number)
217
209
  increments along each direction if ``ninc`` is specified.
218
210
  If ``ninc=None`` (default), the new grid has the same number
219
211
  of increments in each direction as the old grid.
220
212
  """
221
- cdef numpy.npy_intp i, d
222
- cdef numpy.npy_intp dim = self.grid.shape[0]
213
+ cdef Py_ssize_t i, d
214
+ cdef Py_ssize_t dim = self.grid.shape[0]
223
215
  cdef double[:] tmp
224
216
  cdef double[:, ::1] new_grid
225
217
  if ninc is None:
@@ -235,8 +227,8 @@ cdef class AdaptiveMap:
235
227
  "no of increments < 1 in AdaptiveMap -- %s"
236
228
  % str(ninc)
237
229
  )
238
- new_inc = numpy.empty((dim, max(ninc)), FLOAT_TYPE)
239
- new_grid = numpy.empty((dim, new_inc.shape[1] + 1), FLOAT_TYPE)
230
+ new_inc = numpy.empty((dim, max(ninc)), float)
231
+ new_grid = numpy.empty((dim, new_inc.shape[1] + 1), float)
240
232
  for d in range(dim):
241
233
  tmp = numpy.linspace(self.grid[d, 0], self.grid[d, self.ninc[d]], ninc[d] + 1)
242
234
  for i in range(ninc[d] + 1):
@@ -249,7 +241,7 @@ cdef class AdaptiveMap:
249
241
  self.clear()
250
242
 
251
243
  def __call__(self, y):
252
- """ Return ``x`` values corresponding to ``y``.
244
+ r""" Return ``x`` values corresponding to ``y``.
253
245
 
254
246
  ``y`` can be a single ``dim``-dimensional point, or it
255
247
  can be an array ``y[i,j, ..., d]`` of such points (``d=0..dim-1``).
@@ -261,17 +253,17 @@ cdef class AdaptiveMap:
261
253
  if y is None:
262
254
  y = gvar.RNG.random(size=self.dim)
263
255
  else:
264
- y = numpy.asarray(y, FLOAT_TYPE)
256
+ y = numpy.asarray(y, float)
265
257
  y_shape = y.shape
266
258
  y.shape = -1, y.shape[-1]
267
259
  x = 0 * y
268
- jac = numpy.empty(y.shape[0], FLOAT_TYPE)
260
+ jac = numpy.empty(y.shape[0], float)
269
261
  self.map(y, x, jac)
270
262
  x.shape = y_shape
271
263
  return x
272
264
 
273
265
  def jac1d(self, y):
274
- """ Return the map's Jacobian at ``y`` for each direction.
266
+ r""" Return the map's Jacobian at ``y`` for each direction.
275
267
 
276
268
  ``y`` can be a single ``dim``-dimensional point, or it
277
269
  can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
@@ -279,15 +271,15 @@ cdef class AdaptiveMap:
279
271
  (one-dimensional) Jacobian (``dx[d]/dy[d]``) corresponding
280
272
  to ``y[i,j,...,d]``.
281
273
  """
282
- cdef numpy.npy_intp dim = self.grid.shape[0]
283
- cdef numpy.npy_intp i, d, ninc, ny, iy
274
+ cdef Py_ssize_t dim = self.grid.shape[0]
275
+ cdef Py_ssize_t i, d, ninc, ny, iy
284
276
  cdef double y_ninc, dy_ninc
285
277
  cdef double[:,::1] jac
286
278
  y = numpy.asarray(y)
287
279
  y_shape = y.shape
288
280
  y.shape = -1, y.shape[-1]
289
281
  ny = y.shape[0]
290
- jac = numpy.empty(y.shape, FLOAT_TYPE)
282
+ jac = numpy.empty(y.shape, float)
291
283
  for i in range(ny):
292
284
  for d in range(dim):
293
285
  ninc = self.ninc[d]
@@ -303,7 +295,7 @@ cdef class AdaptiveMap:
303
295
  return ans
304
296
 
305
297
  def jac(self, y):
306
- """ Return the map's Jacobian at ``y``.
298
+ r""" Return the map's Jacobian at ``y``.
307
299
 
308
300
  ``y`` can be a single ``dim``-dimensional point, or it
309
301
  can be an array ``y[i,j,...,d]`` of such points (``d=0..dim-1``).
@@ -320,9 +312,9 @@ cdef class AdaptiveMap:
320
312
  double[:, ::1] y,
321
313
  double[:, ::1] x,
322
314
  double[::1] jac,
323
- numpy.npy_intp ny=-1
315
+ Py_ssize_t ny=-1
324
316
  ):
325
- """ Map y to x, where jac is the Jacobian (``dx/dy``).
317
+ r""" Map y to x, where jac is the Jacobian (``dx/dy``).
326
318
 
327
319
  ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
328
320
  ``x[j, d]`` is filled with the corresponding ``x`` values,
@@ -344,9 +336,9 @@ cdef class AdaptiveMap:
344
336
  and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
345
337
  omitted (or negative).
346
338
  """
347
- cdef numpy.npy_intp ninc
348
- cdef numpy.npy_intp dim = self.inc.shape[0]
349
- cdef numpy.npy_intp i, iy, d
339
+ cdef Py_ssize_t ninc
340
+ cdef Py_ssize_t dim = self.inc.shape[0]
341
+ cdef Py_ssize_t i, iy, d
350
342
  cdef double y_ninc, dy_ninc, tmp_jac
351
343
  if ny < 0:
352
344
  ny = y.shape[0]
@@ -372,9 +364,9 @@ cdef class AdaptiveMap:
372
364
  double[:, ::1] x,
373
365
  double[:, ::1] y,
374
366
  double[::1] jac,
375
- numpy.npy_intp nx=-1
367
+ Py_ssize_t nx=-1
376
368
  ):
377
- """ Map x to y, where jac is the Jacobian (``dx/dy``).
369
+ r""" Map x to y, where jac is the Jacobian (``dx/dy``).
378
370
 
379
371
  ``y[j, d]`` is an array of ``ny`` ``y``-values for direction ``d``.
380
372
  ``x[j, d]`` is filled with the corresponding ``x`` values,
@@ -396,10 +388,10 @@ cdef class AdaptiveMap:
396
388
  and ``j=0...nx-1``. ``nx`` is set to ``x.shape[0]`` if it is
397
389
  omitted (or negative).
398
390
  """
399
- cdef numpy.npy_intp ninc
400
- cdef numpy.npy_intp dim = self.inc.shape[0]
401
- cdef numpy.npy_intp[:] iy
402
- cdef numpy.npy_intp i, iyi, d
391
+ cdef Py_ssize_t ninc
392
+ cdef Py_ssize_t dim = self.inc.shape[0]
393
+ cdef Py_ssize_t[:] iy
394
+ cdef Py_ssize_t i, iyi, d
403
395
  cdef double y_ninc, dy_ninc, tmp_jac
404
396
  if nx < 0:
405
397
  nx = x.shape[0]
@@ -430,9 +422,9 @@ cdef class AdaptiveMap:
430
422
  self,
431
423
  double[:, ::1] y,
432
424
  double[::1] f,
433
- numpy.npy_intp ny=-1,
425
+ Py_ssize_t ny=-1,
434
426
  ):
435
- """ Add training data ``f`` for ``y``-space points ``y``.
427
+ r""" Add training data ``f`` for ``y``-space points ``y``.
436
428
 
437
429
  Accumulates training data for later use by ``self.adapt()``.
438
430
  Grid increments will be made smaller in regions where
@@ -450,14 +442,14 @@ cdef class AdaptiveMap:
450
442
  and ``j=0...ny-1``. ``ny`` is set to ``y.shape[0]`` if it is
451
443
  omitted (or negative).
452
444
  """
453
- cdef numpy.npy_intp ninc
454
- cdef numpy.npy_intp dim = self.inc.shape[0]
455
- cdef numpy.npy_intp iy
456
- cdef numpy.npy_intp i, d
445
+ cdef Py_ssize_t ninc
446
+ cdef Py_ssize_t dim = self.inc.shape[0]
447
+ cdef Py_ssize_t iy
448
+ cdef Py_ssize_t i, d
457
449
  if self.sum_f is None:
458
450
  shape = (self.inc.shape[0], self.inc.shape[1])
459
- self.sum_f = numpy.zeros(shape, FLOAT_TYPE)
460
- self.n_f = numpy.zeros(shape, FLOAT_TYPE) + TINY
451
+ self.sum_f = numpy.zeros(shape, float)
452
+ self.n_f = numpy.zeros(shape, float) + TINY
461
453
  if ny < 0:
462
454
  ny = y.shape[0]
463
455
  elif ny > y.shape[0]:
@@ -473,7 +465,7 @@ cdef class AdaptiveMap:
473
465
 
474
466
  # @cython.boundscheck(False)
475
467
  def adapt(self, double alpha=0.0, ninc=None):
476
- """ Adapt grid to accumulated training data.
468
+ r""" Adapt grid to accumulated training data.
477
469
 
478
470
  ``self.adapt(...)`` projects the training data onto
479
471
  each axis independently and maps it into ``x`` space.
@@ -508,10 +500,10 @@ cdef class AdaptiveMap:
508
500
  cdef double[:, ::1] new_grid
509
501
  cdef double[::1] avg_f, tmp_f
510
502
  cdef double sum_f, acc_f, f_ninc
511
- cdef numpy.npy_intp old_ninc
512
- cdef numpy.npy_intp dim = self.grid.shape[0]
513
- cdef numpy.npy_intp i, j
514
- cdef numpy.npy_intp[:] new_ninc
503
+ cdef Py_ssize_t old_ninc
504
+ cdef Py_ssize_t dim = self.grid.shape[0]
505
+ cdef Py_ssize_t i, j
506
+ cdef Py_ssize_t[:] new_ninc
515
507
 
516
508
  # initialization
517
509
  if ninc is None:
@@ -525,12 +517,12 @@ cdef class AdaptiveMap:
525
517
  if min(new_ninc) < 1:
526
518
  raise ValueError('ninc < 1: ' + str(list(new_ninc)))
527
519
  if max(new_ninc) == 1:
528
- new_grid = numpy.empty((dim, 2), FLOAT_TYPE)
520
+ new_grid = numpy.empty((dim, 2), float)
529
521
  for d in range(dim):
530
522
  new_grid[d, 0] = self.grid[d, 0]
531
523
  new_grid[d, 1] = self.grid[d, self.ninc[d]]
532
524
  self.grid = numpy.asarray(new_grid)
533
- self.inc = numpy.empty((dim, 1), FLOAT_TYPE)
525
+ self.inc = numpy.empty((dim, 1), float)
534
526
  self.ninc = numpy.array(dim * [1], dtype=numpy.intp)
535
527
  for d in range(dim):
536
528
  self.inc[d, 0] = self.grid[d, 1] - self.grid[d, 0]
@@ -538,10 +530,10 @@ cdef class AdaptiveMap:
538
530
  return
539
531
 
540
532
  # smooth and regrid
541
- new_grid = numpy.empty((dim, max(new_ninc) + 1), FLOAT_TYPE)
542
- avg_f = numpy.ones(self.inc.shape[1], FLOAT_TYPE) # default = uniform
533
+ new_grid = numpy.empty((dim, max(new_ninc) + 1), float)
534
+ avg_f = numpy.ones(self.inc.shape[1], float) # default = uniform
543
535
  if alpha > 0 and max(self.ninc) > 1:
544
- tmp_f = numpy.empty(self.inc.shape[1], FLOAT_TYPE)
536
+ tmp_f = numpy.empty(self.inc.shape[1], float)
545
537
  for d in range(dim):
546
538
  old_ninc = self.ninc[d]
547
539
  if alpha != 0 and old_ninc > 1:
@@ -607,7 +599,7 @@ cdef class AdaptiveMap:
607
599
  self.n_f = None
608
600
 
609
601
  def show_grid(self, ngrid=40, axes=None, shrink=False, plotter=None):
610
- """ Display plots showing the current grid.
602
+ r""" Display plots showing the current grid.
611
603
 
612
604
  Args:
613
605
  ngrid (int): The number of grid nodes in each
@@ -734,7 +726,7 @@ cdef class AdaptiveMap:
734
726
  return plt
735
727
 
736
728
  def adapt_to_samples(self, x, f, nitn=5, alpha=1.0, nproc=1):
737
- """ Adapt map to data ``{x, f(x)}``.
729
+ r""" Adapt map to data ``{x, f(x)}``.
738
730
 
739
731
  Replace grid with one that is optimized for integrating
740
732
  function ``f(x)``. New grid is found iteratively
@@ -774,7 +766,7 @@ cdef class AdaptiveMap:
774
766
  on the machine (equivalent to ``nproc=os.cpu_count()``).
775
767
  Default value is ``nproc=1``. (Requires Python 3.3 or later.)
776
768
  """
777
- cdef numpy.npy_intp i, tmp_ninc, old_ninc
769
+ cdef Py_ssize_t i, tmp_ninc, old_ninc
778
770
  x = numpy.ascontiguousarray(x)
779
771
  if len(x.shape) != 2 or x.shape[1] != self.dim:
780
772
  raise ValueError('incompatible shape of x: {}'.format(x.shape))
@@ -840,7 +832,7 @@ cdef class AdaptiveMap:
840
832
  return (numpy.asarray(map.sum_f), numpy.asarray(map.n_f))
841
833
 
842
834
  cdef class Integrator(object):
843
- """ Adaptive multidimensional Monte Carlo integration.
835
+ r""" Adaptive multidimensional Monte Carlo integration.
844
836
 
845
837
  :class:`vegas.Integrator` objects make Monte Carlo
846
838
  estimates of multidimensional functions ``f(x)``
@@ -898,7 +890,7 @@ cdef class Integrator(object):
898
890
 
899
891
  More generally, the integrator packages integration points in
900
892
  multidimensional arrays ``x[d1, d2..dn]`` when the integration
901
- limits are specified by ``map[d1, d2...dn, i]` with ``i=0,1``.
893
+ limits are specified by ``map[d1, d2...dn, i]`` with ``i=0,1``.
902
894
  These arrays can have any shape.
903
895
 
904
896
  Alternatively, the integration region can be specified by a
@@ -1147,7 +1139,7 @@ cdef class Integrator(object):
1147
1139
  self.sum_sigf = numpy.sum(self.sigf)
1148
1140
  self.nstrat = numpy.array(map.nstrat)
1149
1141
  else:
1150
- self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1142
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1151
1143
  self.sum_sigf = HUGE
1152
1144
  args = dict(Integrator.defaults)
1153
1145
  if 'map' in args:
@@ -1174,11 +1166,11 @@ cdef class Integrator(object):
1174
1166
  self.sigf_h5.close()
1175
1167
  os.unlink(fname)
1176
1168
  self.sigf_h5 = None
1177
- self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1169
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1178
1170
  self.sum_sigf = HUGE
1179
1171
 
1180
1172
  def __reduce__(Integrator self not None):
1181
- """ Capture state for pickling. """
1173
+ r""" Capture state for pickling. """
1182
1174
  odict = dict()
1183
1175
  for k in Integrator.defaults:
1184
1176
  if k in ['map']:
@@ -1189,11 +1181,11 @@ cdef class Integrator(object):
1189
1181
  return (Integrator, (self.map,), odict)
1190
1182
 
1191
1183
  def __setstate__(Integrator self not None, odict):
1192
- """ Set state for unpickling. """
1184
+ r""" Set state for unpickling. """
1193
1185
  self.set(odict)
1194
1186
 
1195
1187
  def _set_map(self, map):
1196
- """ install new map, create xsample """
1188
+ r""" install new map, create xsample """
1197
1189
  if isinstance(map, AdaptiveMap):
1198
1190
  self.map = AdaptiveMap(map)
1199
1191
  self.xsample = numpy.empty(self.map.dim, dtype=float)
@@ -1240,7 +1232,7 @@ cdef class Integrator(object):
1240
1232
 
1241
1233
 
1242
1234
  def set(Integrator self not None, ka={}, **kargs):
1243
- """ Reset default parameters in integrator.
1235
+ r""" Reset default parameters in integrator.
1244
1236
 
1245
1237
  Usage is analogous to the constructor
1246
1238
  for |Integrator|: for example, ::
@@ -1376,7 +1368,7 @@ cdef class Integrator(object):
1376
1368
  # need to recalculate stratification distribution for beta>0
1377
1369
  # unless a new sigf was set
1378
1370
  old_val['sigf'] = self.sigf
1379
- self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1371
+ self.sigf = numpy.array([], float) # reset sigf (dummy)
1380
1372
  self.sum_sigf = HUGE
1381
1373
  self.nstrat = nstrat
1382
1374
 
@@ -1399,11 +1391,11 @@ cdef class Integrator(object):
1399
1391
 
1400
1392
  # neval_batch = self.nhcube_batch * avg_neval_hcube
1401
1393
  nsigf = self.nhcube
1402
- if self.beta > 0 and self.nhcube > 1 and not self.adapt_to_errors and len(self.sigf) != nsigf:
1394
+ if self.beta >= 0 and self.nhcube > 1 and not self.adapt_to_errors and len(self.sigf) != nsigf:
1403
1395
  # set up sigf
1404
1396
  self._clear_sigf_h5()
1405
1397
  if not self.minimize_mem:
1406
- self.sigf = numpy.ones(nsigf, FLOAT_TYPE)
1398
+ self.sigf = numpy.ones(nsigf, float)
1407
1399
  else:
1408
1400
  try:
1409
1401
  import h5py
@@ -1415,14 +1407,14 @@ cdef class Integrator(object):
1415
1407
  self.sum_sigf = nsigf
1416
1408
  self.neval_hcube = numpy.empty(self.min_neval_batch // 2 + 1, dtype=numpy.intp)
1417
1409
  self.neval_hcube[:] = avg_neval_hcube
1418
- self.y = numpy.empty((self.min_neval_batch, self.dim), FLOAT_TYPE)
1419
- self.x = numpy.empty((self.min_neval_batch, self.dim), FLOAT_TYPE)
1420
- self.jac = numpy.empty(self.min_neval_batch, FLOAT_TYPE)
1421
- self.fdv2 = numpy.empty(self.min_neval_batch, FLOAT_TYPE)
1410
+ self.y = numpy.empty((self.min_neval_batch, self.dim), float)
1411
+ self.x = numpy.empty((self.min_neval_batch, self.dim), float)
1412
+ self.jac = numpy.empty(self.min_neval_batch, float)
1413
+ self.fdv2 = numpy.empty(self.min_neval_batch, float)
1422
1414
  return old_val
1423
1415
 
1424
1416
  def settings(Integrator self not None, ngrid=0):
1425
- """ Assemble summary of integrator settings into string.
1417
+ r""" Assemble summary of integrator settings into string.
1426
1418
 
1427
1419
  Args:
1428
1420
  ngrid (int): Number of grid nodes in each direction
@@ -1431,7 +1423,7 @@ cdef class Integrator(object):
1431
1423
  Returns:
1432
1424
  String containing the settings.
1433
1425
  """
1434
- cdef numpy.npy_intp d
1426
+ cdef Py_ssize_t d
1435
1427
  nhcube = numpy.prod(self.nstrat)
1436
1428
  neval = nhcube * self.min_neval_hcube if self.beta <= 0 else self.neval
1437
1429
  ans = "Integrator Settings:\n"
@@ -1574,9 +1566,9 @@ cdef class Integrator(object):
1574
1566
  Integrator self not None,
1575
1567
  bint yield_hcube=False,
1576
1568
  bint yield_y=False,
1577
- fcn = None,
1569
+ # fcn = None,
1578
1570
  ):
1579
- """ Iterator over integration points and weights.
1571
+ r""" Low-level batch iterator over integration points and weights.
1580
1572
 
1581
1573
  This method creates an iterator that returns integration
1582
1574
  points from |vegas|, and their corresponding weights in an
@@ -1602,24 +1594,41 @@ cdef class Integrator(object):
1602
1594
  corresponds to a single iteration. The number in a batch
1603
1595
  is controlled by parameter ``nhcube_batch``.
1604
1596
  """
1605
- cdef numpy.npy_intp nhcube = numpy.prod(self.nstrat)
1597
+ for t in self._random_batch(yield_hcube, yield_y):
1598
+ yield tuple(numpy.array(ti) for ti in t)
1599
+
1600
+ def _random_batch(
1601
+ Integrator self not None,
1602
+ bint yield_hcube=False,
1603
+ bint yield_y=False,
1604
+ # fcn = None,
1605
+ ):
1606
+ r""" Underlying implementation of generator :meth:`Integrator.random_batch`.
1607
+
1608
+ Only difference from ``random_batch()`` is that the values for
1609
+ ``x``, ``y``, etc. are returned here as memoryviews into internal buffers
1610
+ that are overwritten by subsequent iterations. ``random_batch()`` returns
1611
+ copies of the views that are not overwritten. ``_random_batch()`` is used
1612
+ internally to minimize memory and memory churn.
1613
+ """
1614
+ cdef Py_ssize_t nhcube = numpy.prod(self.nstrat)
1606
1615
  cdef double dv_y = 1. / nhcube
1607
- # cdef numpy.npy_intp min_neval_batch #= min(self.min_neval_batch, nhcube)
1608
- cdef numpy.npy_intp neval_batch # self.neval_batch
1609
- cdef numpy.npy_intp hcube_base
1610
- cdef numpy.npy_intp i_start, ihcube, i, d, tmp_hcube, hcube
1611
- cdef numpy.npy_intp[::1] hcube_array
1616
+ # cdef Py_ssize_t min_neval_batch #= min(self.min_neval_batch, nhcube)
1617
+ cdef Py_ssize_t neval_batch # self.neval_batch
1618
+ cdef Py_ssize_t hcube_base
1619
+ cdef Py_ssize_t i_start, ihcube, i, d, tmp_hcube, hcube
1620
+ cdef Py_ssize_t[::1] hcube_array
1612
1621
  cdef double neval_sigf = (
1613
1622
  self.neval_frac * self.neval / self.sum_sigf
1614
1623
  if self.beta > 0 and self.sum_sigf > 0 and not self.adapt_to_errors
1615
1624
  else 0.0 # use min_neval_hcube (should not happen ever)
1616
1625
  )
1617
- cdef numpy.npy_intp avg_neval_hcube = int(self.neval / self.nhcube)
1618
- cdef numpy.npy_intp min_neval_batch = self.min_neval_batch # min_neval_batch * avg_neval_hcube ####
1619
- cdef numpy.npy_intp max_nhcube_batch = min_neval_batch // 2 + 1 ####
1620
- cdef numpy.npy_intp[::1] neval_hcube = self.neval_hcube
1621
- cdef numpy.npy_intp[::1] y0 = numpy.empty(self.dim, numpy.intp)
1622
- cdef numpy.npy_intp max_neval_hcube = max(
1626
+ cdef Py_ssize_t avg_neval_hcube = int(self.neval / self.nhcube)
1627
+ cdef Py_ssize_t min_neval_batch = self.min_neval_batch # min_neval_batch * avg_neval_hcube ####
1628
+ cdef Py_ssize_t max_nhcube_batch = min_neval_batch // 2 + 1 ####
1629
+ cdef Py_ssize_t[::1] neval_hcube = self.neval_hcube
1630
+ cdef Py_ssize_t[::1] y0 = numpy.empty(self.dim, numpy.intp)
1631
+ cdef Py_ssize_t max_neval_hcube = max(
1623
1632
  self.max_neval_hcube, self.min_neval_hcube
1624
1633
  )
1625
1634
  cdef double[::1] sigf
@@ -1637,12 +1646,10 @@ cdef class Integrator(object):
1637
1646
  self.neval_hcube_range = numpy.zeros(2, numpy.intp) + self.min_neval_hcube
1638
1647
  if yield_hcube:
1639
1648
  hcube_array = numpy.empty(self.y.shape[0], numpy.intp)
1640
- if adaptive_strat and self.minimize_mem and not self.adapt:
1641
- # can't minimize_mem without also adapting, so force beta=0
1642
- neval_sigf = 0.0
1643
- # for hcube_base in range(0, nhcube, min_neval_batch):
1644
- # if (hcube_base + min_neval_batch) > nhcube:
1645
- # min_neval_batch = nhcube - hcube_base
1649
+ # if adaptive_strat and self.minimize_mem and not self.adapt:
1650
+ ##### believe this was wrong idea; want to preserve adaptive strat if it exists
1651
+ # # can't minimize_mem without also adapting, so force beta=0
1652
+ # neval_sigf = 0.0
1646
1653
  neval_batch = 0
1647
1654
  hcube_base = 0
1648
1655
  sigf = self.sigf[hcube_base:hcube_base + max_nhcube_batch]
@@ -1674,10 +1681,10 @@ cdef class Integrator(object):
1674
1681
 
1675
1682
  # 1) resize work arrays if needed (to double what is needed)
1676
1683
  if neval_batch > self.y.shape[0]:
1677
- self.y = numpy.empty((2 * neval_batch, self.dim), FLOAT_TYPE)
1678
- self.x = numpy.empty((2 * neval_batch, self.dim), FLOAT_TYPE)
1679
- self.jac = numpy.empty(2 * neval_batch, FLOAT_TYPE)
1680
- self.fdv2 = numpy.empty(2 * neval_batch, FLOAT_TYPE)
1684
+ self.y = numpy.empty((2 * neval_batch, self.dim), float)
1685
+ self.x = numpy.empty((2 * neval_batch, self.dim), float)
1686
+ self.jac = numpy.empty(2 * neval_batch, float)
1687
+ self.fdv2 = numpy.empty(2 * neval_batch, float)
1681
1688
  y = self.y
1682
1689
  x = self.x
1683
1690
  jac = self.jac
@@ -1694,7 +1701,7 @@ cdef class Integrator(object):
1694
1701
  tmp_hcube = (tmp_hcube - y0[d]) // self.nstrat[d]
1695
1702
  for d in range(self.dim):
1696
1703
  for i in range(i_start, i_start + neval_hcube[ihcube]):
1697
- y[i, d] = (y0[d] + yran[i, d]) / self.nstrat[d]
1704
+ y[i, d] = (y0[d] + yran[i, d]) / self.nstrat[d]
1698
1705
  i_start += neval_hcube[ihcube]
1699
1706
  self.map.map(y, x, jac, neval_batch)
1700
1707
 
@@ -1706,12 +1713,12 @@ cdef class Integrator(object):
1706
1713
  if yield_hcube:
1707
1714
  hcube_array[i] = hcube_base + ihcube
1708
1715
  i_start += neval_hcube[ihcube]
1709
- answer = (numpy.asarray(x[:neval_batch, :]),)
1716
+ answer = (x[:neval_batch, :],)
1710
1717
  if yield_y:
1711
- answer += (numpy.asarray(y[:neval_batch, :]),)
1712
- answer += (numpy.asarray(jac[:neval_batch]),)
1718
+ answer += (y[:neval_batch, :],)
1719
+ answer += (jac[:neval_batch],)
1713
1720
  if yield_hcube:
1714
- answer += (numpy.asarray(hcube_array[:neval_batch]),)
1721
+ answer += (hcube_array[:neval_batch],)
1715
1722
  yield answer
1716
1723
 
1717
1724
  # reset parameters for main loop
@@ -1726,7 +1733,7 @@ cdef class Integrator(object):
1726
1733
  def random(
1727
1734
  Integrator self not None, bint yield_hcube=False, bint yield_y=False
1728
1735
  ):
1729
- """ Iterator over integration points and weights.
1736
+ r""" Low-level iterator over integration points and weights.
1730
1737
 
1731
1738
  This method creates an iterator that returns integration
1732
1739
  points from |vegas|, and their corresponding weights in an
@@ -1749,9 +1756,9 @@ cdef class Integrator(object):
1749
1756
  """
1750
1757
  cdef double[:, ::1] x
1751
1758
  cdef double[::1] wgt
1752
- cdef numpy.npy_intp[::1] hcube
1759
+ cdef Py_ssize_t[::1] hcube
1753
1760
  cdef double[:, ::1] y
1754
- cdef numpy.npy_intp i
1761
+ cdef Py_ssize_t i
1755
1762
  if yield_hcube and yield_y:
1756
1763
  for x, y, wgt, hcube in self.random_batch(yield_hcube=True, yield_y=True):
1757
1764
  for i in range(x.shape[0]):
@@ -1769,6 +1776,56 @@ cdef class Integrator(object):
1769
1776
  for i in range(x.shape[0]):
1770
1777
  yield (x[i], wgt[i])
1771
1778
 
1779
+ def sample(self, nbatch=None, mode='rbatch'):
1780
+ r""" Generate random sample of integration weights and points.
1781
+
1782
+ Given a :class:`vegas.Integrator` called ``integ``, the code ::
1783
+
1784
+ wgt, x = integ.sample(mode='lbatch')
1785
+
1786
+ generates a random array of integration points ``x`` and the
1787
+ array of corresponding weights ``w`` such that ::
1788
+
1789
+ r = sum(wgt * f(x))
1790
+
1791
+ is an estimate of the integral of ``lbatch`` integrand ``f(x)``.
1792
+ Setting parameter ``mode='rbatch'`` formats ``x`` for use
1793
+ in ``rbatch`` integrands.
1794
+
1795
+ Parameter ``nbatch`` specifies the minimum number of integration
1796
+ points in the sample. The actual number is the smallest integer
1797
+ multiple of ``integ.last_neval`` that is equal to or larger than
1798
+ ``nbatch``.
1799
+ """
1800
+ neval = self.last_neval if self.last_neval > 0 else self.neval
1801
+ nbatch = neval if nbatch is None else int(nbatch)
1802
+ nit = nbatch // neval
1803
+ if nit * neval < nbatch:
1804
+ nit += 1
1805
+ samples = []
1806
+ wgts = []
1807
+ for _ in range(nit):
1808
+ for x, w in self.random_batch():
1809
+ samples.append(numpy.array(x))
1810
+ wgts.append(numpy.array(w))
1811
+ samples = numpy.concatenate(samples, axis=0)
1812
+ wgts = numpy.concatenate(wgts) / nit
1813
+ # need to fix following to allow other formats for x
1814
+ if self.xsample.shape is None:
1815
+ if mode == 'rbatch':
1816
+ samples = gvar.BufferDict(self.xsample, rbatch_buf=samples.T)
1817
+ else:
1818
+ samples = gvar.BufferDict(self.xsample, lbatch_buf=samples)
1819
+ else:
1820
+ if self.xsample.shape != ():
1821
+ if mode == 'rbatch':
1822
+ samples = samples.T
1823
+ samples.shape = self.xsample.shape + (-1,)
1824
+ else:
1825
+ samples.shape = (-1,) + self.xsample.shape
1826
+ return wgts, samples
1827
+
1828
+
1772
1829
  @staticmethod
1773
1830
  def synchronize_random():
1774
1831
  try:
@@ -1791,16 +1848,16 @@ cdef class Integrator(object):
1791
1848
  gvar.ranseed(seed)
1792
1849
 
1793
1850
  def _make_std_integrand(self, fcn, xsample=None):
1794
- """ Convert integrand ``fcn`` into an lbatch integrand.
1851
+ r""" Convert integrand ``fcn`` into an lbatch integrand.
1795
1852
 
1796
- Returns an object of ``vi`` of type :class:`VegasIntegrand`.
1853
+ Returns an object ``vi`` of type :class:`VegasIntegrand`.
1797
1854
  This object converts an arbitrary integrand ``fcn`` (``lbatch`, `rbatch`,
1798
1855
  and non-batch, with or without dictionaries for input or output)
1799
1856
  into a standard form: an lbatch integrand whose output is a
1800
1857
  2-d lbatch array.
1801
1858
 
1802
1859
  This is useful when building integrands that call other
1803
- functions of the parameters. The latter are converted
1860
+ functions of the parameters. The latter are converted to
1804
1861
  lbatch integrands irrespective of what they were
1805
1862
  originally. This standardizes them, making it straightforward
1806
1863
  to build them into a new integrand.
@@ -1816,7 +1873,7 @@ cdef class Integrator(object):
1816
1873
  )
1817
1874
 
1818
1875
  def __call__(Integrator self not None, fcn, save=None, saveall=None, **kargs):
1819
- """ Integrate integrand ``fcn``.
1876
+ r""" Integrate integrand ``fcn``.
1820
1877
 
1821
1878
  A typical integrand has the form, for example::
1822
1879
 
@@ -1924,10 +1981,11 @@ cdef class Integrator(object):
1924
1981
  an object of type :class:`vegas.RAvg`,
1925
1982
  :class:`vegas.RAvgArray`, or :class:`vegas.RAvgDict`.
1926
1983
  """
1927
- cdef numpy.ndarray[numpy.double_t, ndim=2] x
1928
- cdef numpy.ndarray[numpy.double_t, ndim=2] jac
1929
- cdef numpy.ndarray[numpy.double_t, ndim=1] wgt
1930
- cdef numpy.ndarray[numpy.npy_intp, ndim=1] hcube
1984
+ cdef double[:, ::1] x
1985
+ # cdef double[:, ::1] jac
1986
+ cdef double[::1] wgt
1987
+ cdef Py_ssize_t[::1] hcube
1988
+
1931
1989
  cdef double[::1] sigf
1932
1990
  cdef double[:, ::1] y
1933
1991
  cdef double[::1] fdv2
@@ -1936,9 +1994,9 @@ cdef class Integrator(object):
1936
1994
  cdef double[::1] sum_wf
1937
1995
  cdef double[::1] sum_dwf
1938
1996
  cdef double[:, ::1] sum_dwf2
1939
- cdef double[::1] mean = numpy.empty(1, FLOAT_TYPE)
1940
- cdef double[:, ::1] var = numpy.empty((1, 1), FLOAT_TYPE)
1941
- cdef numpy.npy_intp itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
1997
+ cdef double[::1] mean = numpy.empty(1, float)
1998
+ cdef double[:, ::1] var = numpy.empty((1, 1), float)
1999
+ cdef Py_ssize_t itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
1942
2000
  cdef bint adaptive_strat
1943
2001
  cdef double sum_sigf, sigf2
1944
2002
  cdef bint firsteval = True
@@ -1968,12 +2026,12 @@ cdef class Integrator(object):
1968
2026
  fcn_size = fcn.size
1969
2027
 
1970
2028
  # allocate work arrays
1971
- dwf = numpy.empty(fcn_size, FLOAT_TYPE)
1972
- sum_wf = numpy.empty(fcn_size, FLOAT_TYPE)
1973
- sum_dwf = numpy.empty(fcn_size, FLOAT_TYPE)
1974
- sum_dwf2 = numpy.empty((fcn_size, fcn_size), FLOAT_TYPE)
1975
- mean = numpy.empty(fcn_size, FLOAT_TYPE)
1976
- var = numpy.empty((fcn_size, fcn_size), FLOAT_TYPE)
2029
+ dwf = numpy.empty(fcn_size, float)
2030
+ sum_wf = numpy.empty(fcn_size, float)
2031
+ sum_dwf = numpy.empty(fcn_size, float)
2032
+ sum_dwf2 = numpy.empty((fcn_size, fcn_size), float)
2033
+ mean = numpy.empty(fcn_size, float)
2034
+ var = numpy.empty((fcn_size, fcn_size), float)
1977
2035
  mean[:] = 0.0
1978
2036
  var[:, :] = 0.0
1979
2037
  result = VegasResult(fcn, weighted=self.adapt)
@@ -1988,32 +2046,36 @@ cdef class Integrator(object):
1988
2046
  sum_sigf = 0.0
1989
2047
 
1990
2048
  # iterate batch-slices of integration points
1991
- for x, y, wgt, hcube in self.random_batch(
1992
- yield_hcube=True, yield_y=True, fcn=fcn
2049
+ for x, y, wgt, hcube in self._random_batch(
2050
+ yield_hcube=True, yield_y=True, #fcn=fcn
1993
2051
  ):
1994
2052
  fdv2 = self.fdv2 # must be inside loop
1995
2053
  len_hcube = len(hcube)
1996
2054
 
1997
2055
  # evaluate integrand at all points in x
2056
+ xa = numpy.asarray(x)
1998
2057
  if self.nproc > 1:
1999
2058
  nx = x.shape[0] // self.nproc + 1
2000
2059
  if self.uses_jac:
2001
- jac = self.map.jac1d(y)
2060
+ jac1d = self.map.jac1d(y)
2002
2061
  results = self.pool.starmap(
2003
2062
  fcn.eval,
2004
- [(x[i*nx : (i+1)*nx], jac[i*nx : (i+1)*nx]) for i in range(self.nproc) if i*nx < x.shape[0]],
2063
+ [(xa[i*nx : (i+1)*nx], jac1d[i*nx : (i+1)*nx]) for i in range(self.nproc) if i*nx < xa.shape[0]],
2005
2064
  1,
2006
2065
  )
2007
2066
  else:
2008
2067
  results = self.pool.starmap(
2009
2068
  fcn.eval,
2010
- [(x[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < x.shape[0]],
2069
+ [(xa[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < xa.shape[0]],
2011
2070
  1,
2012
2071
  )
2013
- fx = numpy.concatenate(results, axis=0)
2014
- else:
2015
- fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2016
-
2072
+ fx = numpy.concatenate(results, axis=0, dtype=float)
2073
+ else:
2074
+ # fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2075
+ fx = numpy.asarray(
2076
+ fcn.eval(xa, jac=self.map.jac1d(y) if self.uses_jac else None),
2077
+ dtype=float
2078
+ )
2017
2079
  # sanity check
2018
2080
  if numpy.any(numpy.isnan(fx)):
2019
2081
  raise ValueError('integrand evaluates to nan')
@@ -2102,7 +2164,7 @@ cdef class Integrator(object):
2102
2164
  return result.result
2103
2165
 
2104
2166
  class reporter:
2105
- """ Analyzer class that prints out a report, iteration
2167
+ r""" Analyzer class that prints out a report, iteration
2106
2168
  by interation, on how vegas is doing. Parameter ngrid
2107
2169
  specifies how many x[i]'s to print out from the maps
2108
2170
  for each axis.
@@ -2146,7 +2208,7 @@ class reporter:
2146
2208
  # average of the results of all iterations (unless parameter weigthed=False,
2147
2209
  # in which case the average is unweighted).
2148
2210
  class RAvg(gvar.GVar):
2149
- """ Running average of scalar-valued Monte Carlo estimates.
2211
+ r""" Running average of scalar-valued Monte Carlo estimates.
2150
2212
 
2151
2213
  This class accumulates independent Monte Carlo
2152
2214
  estimates (e.g., of an integral) and combines
@@ -2184,7 +2246,7 @@ class RAvg(gvar.GVar):
2184
2246
  self.sum_neval = sum_neval
2185
2247
 
2186
2248
  def extend(self, ravg):
2187
- """ Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``. """
2249
+ r""" Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``. """
2188
2250
  for r in ravg.itn_results:
2189
2251
  self.add(r)
2190
2252
  self.sum_neval += ravg.sum_neval
@@ -2262,7 +2324,7 @@ class RAvg(gvar.GVar):
2262
2324
  return self.sdev < atol + rtol * abs(self.mean)
2263
2325
 
2264
2326
  def add(self, g):
2265
- """ Add estimate ``g`` to the running average. """
2327
+ r""" Add estimate ``g`` to the running average. """
2266
2328
  self.itn_results.append(g)
2267
2329
  if isinstance(g, gvar.GVarRef):
2268
2330
  return
@@ -2282,7 +2344,7 @@ class RAvg(gvar.GVar):
2282
2344
  super(RAvg, self).__init__(*gvar.gvar(mean, numpy.sqrt(var)).internaldata)
2283
2345
 
2284
2346
  def summary(self, extended=False, weighted=None):
2285
- """ Assemble summary of results, iteration-by-iteration, into a string.
2347
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2286
2348
 
2287
2349
  Args:
2288
2350
  weighted (bool): Display weighted averages of results from different
@@ -2323,7 +2385,7 @@ class RAvg(gvar.GVar):
2323
2385
  return ans
2324
2386
 
2325
2387
  class RAvgDict(gvar.BufferDict):
2326
- """ Running average of dictionary-valued Monte Carlo estimates.
2388
+ r""" Running average of dictionary-valued Monte Carlo estimates.
2327
2389
 
2328
2390
  This class accumulates independent dictionaries of Monte Carlo
2329
2391
  estimates (e.g., of an integral) and combines
@@ -2351,7 +2413,7 @@ class RAvgDict(gvar.BufferDict):
2351
2413
  self.sum_neval = sum_neval
2352
2414
 
2353
2415
  def extend(self, ravg):
2354
- """ Merge results from :class:`RAvgDict` object ``ravg`` after results currently in ``self``. """
2416
+ r""" Merge results from :class:`RAvgDict` object ``ravg`` after results currently in ``self``. """
2355
2417
  for r in ravg.itn_results:
2356
2418
  self.add(r)
2357
2419
  self.sum_neval += ravg.sum_neval
@@ -2400,7 +2462,7 @@ class RAvgDict(gvar.BufferDict):
2400
2462
  self.rarray.add(newg.buf)
2401
2463
 
2402
2464
  def summary(self, extended=False, weighted=None, rescale=None):
2403
- """ Assemble summary of results, iteration-by-iteration, into a string.
2465
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2404
2466
 
2405
2467
  Args:
2406
2468
  extended (bool): Include a table of final averages for every
@@ -2449,7 +2511,7 @@ class RAvgDict(gvar.BufferDict):
2449
2511
  rescale = property(_get_rescale, None, None, "Integrals divided by ``rescale`` before doing weighted averages.")
2450
2512
 
2451
2513
  class RAvgArray(numpy.ndarray):
2452
- """ Running average of array-valued Monte Carlo estimates.
2514
+ r""" Running average of array-valued Monte Carlo estimates.
2453
2515
 
2454
2516
  This class accumulates independent arrays of Monte Carlo
2455
2517
  estimates (e.g., of an integral) and combines
@@ -2579,7 +2641,7 @@ class RAvgArray(numpy.ndarray):
2579
2641
  self.add(r)
2580
2642
 
2581
2643
  def extend(self, ravg):
2582
- """ Merge results from :class:`RAvgArray` object ``ravg`` after results currently in ``self``. """
2644
+ r""" Merge results from :class:`RAvgArray` object ``ravg`` after results currently in ``self``. """
2583
2645
  for r in ravg.itn_results:
2584
2646
  self.add(r)
2585
2647
  self.sum_neval += ravg.sum_neval
@@ -2654,7 +2716,7 @@ class RAvgArray(numpy.ndarray):
2654
2716
  avg_neval = property(_avg_neval, None, None, "Average number of integrand evaluations per iteration.")
2655
2717
 
2656
2718
  def add(self, g):
2657
- """ Add estimate ``g`` to the running average. """
2719
+ r""" Add estimate ``g`` to the running average. """
2658
2720
  g = numpy.asarray(g)
2659
2721
  self.itn_results.append(g)
2660
2722
  if g.size > 1 and isinstance(g.flat[0], gvar.GVarRef):
@@ -2703,7 +2765,7 @@ class RAvgArray(numpy.ndarray):
2703
2765
  self[:] = gvar.gvar(mean, cov).reshape(self.shape)
2704
2766
 
2705
2767
  def summary(self, extended=False, weighted=None, rescale=None):
2706
- """ Assemble summary of results, iteration-by-iteration, into a string.
2768
+ r""" Assemble summary of results, iteration-by-iteration, into a string.
2707
2769
 
2708
2770
  Args:
2709
2771
  extended (bool): Include a table of final averages for every
@@ -2821,7 +2883,7 @@ cdef class VegasResult:
2821
2883
  self.result.sum_neval = self.sum_neval
2822
2884
 
2823
2885
  def update_analyzer(self, analyzer):
2824
- """ Update analyzer at end of an iteration. """
2886
+ r""" Update analyzer at end of an iteration. """
2825
2887
  analyzer.end(self.result.itn_results[-1], self.result)
2826
2888
 
2827
2889
  def converged(self, rtol, atol):
@@ -2831,7 +2893,7 @@ cdef class VegasResult:
2831
2893
  cdef class VegasIntegrand:
2832
2894
  cdef public object shape
2833
2895
  cdef public object fcntype
2834
- cdef public numpy.npy_intp size
2896
+ cdef public Py_ssize_t size
2835
2897
  cdef public object eval
2836
2898
  cdef public object bdict
2837
2899
  cdef public int mpi_nproc # number of MPI processors
@@ -2963,14 +3025,14 @@ cdef class VegasIntegrand:
2963
3025
  nx = x.shape[0] // self.mpi_nproc + 1
2964
3026
  i0 = self.rank * nx
2965
3027
  i1 = min(i0 + nx, x.shape[0])
2966
- f = numpy.empty((nx, self.size), FLOAT_TYPE)
3028
+ f = numpy.empty((nx, self.size), float)
2967
3029
  if i1 > i0:
2968
3030
  # fill f so long as haven't gone off end
2969
3031
  if jac is None:
2970
3032
  f[:(i1-i0)] = _eval(x[i0:i1], jac=None)
2971
3033
  else:
2972
3034
  f[:(i1-i0)] = _eval(x[i0:i1], jac=jac[i0:i1])
2973
- results = numpy.empty((self.mpi_nproc * nx, self.size), FLOAT_TYPE)
3035
+ results = numpy.empty((self.mpi_nproc * nx, self.size), float)
2974
3036
  self.comm.Allgather(f, results)
2975
3037
  return results[:x.shape[0]]
2976
3038
  self.eval = _mpi_eval
@@ -2986,7 +3048,7 @@ cdef class VegasIntegrand:
2986
3048
  self.eval = gvar.distribute_gvars(self.eval, gvlist)
2987
3049
 
2988
3050
  def __call__(self, x, jac=None):
2989
- """ Non-batch version of fcn """
3051
+ r""" Non-batch version of fcn """
2990
3052
  # repack x as lbatch array and evaluate function via eval
2991
3053
  if hasattr(x, 'keys'):
2992
3054
  x = gvar.asbufferdict(x)
@@ -2997,7 +3059,7 @@ cdef class VegasIntegrand:
2997
3059
  return self.format_result(fx)
2998
3060
 
2999
3061
  def format_result(self, mean, var=None):
3000
- """ Reformat output from integrator to correspond to original output format """
3062
+ r""" Reformat output from integrator to correspond to original output format """
3001
3063
  if var is None:
3002
3064
  # mean is an ndarray
3003
3065
  if self.shape is None:
@@ -3016,7 +3078,7 @@ cdef class VegasIntegrand:
3016
3078
  return gvar.gvar(mean, var).reshape(self.shape)
3017
3079
 
3018
3080
  def format_evalx(self, evalx):
3019
- """ Reformat output from eval(x).
3081
+ r""" Reformat output from eval(x).
3020
3082
 
3021
3083
  ``self.eval(x)`` returns an array ``evalx[i,d]`` where ``i`` is the batch index and ``d``
3022
3084
  labels different components of the ``self.fcn`` output. ``self.format_evalx(evalx)``
@@ -3029,8 +3091,8 @@ cdef class VegasIntegrand:
3029
3091
  return evalx.reshape(evalx.shape[:1] + self.shape)
3030
3092
 
3031
3093
  def training(self, x, jac):
3032
- """ Calculate first element of integrand at point ``x``. """
3033
- cdef numpy.ndarray fx =self.eval(x, jac=jac)
3094
+ r""" Calculate first element of integrand at point ``x``. """
3095
+ fx =self.eval(x, jac=jac)
3034
3096
  if fx.ndim == 1:
3035
3097
  return fx
3036
3098
  else:
@@ -3066,8 +3128,9 @@ cdef class _BatchIntegrand_from_Base(object):
3066
3128
  def _distribute_gvars(self, gvlist):
3067
3129
  self.fcn = gvar.distribute_gvars(self.fcn, gvlist)
3068
3130
 
3069
- def non_std_arg_fcn(self, numpy.ndarray[numpy.double_t, ndim=1] x, jac=None):
3131
+ def non_std_arg_fcn(self, x, jac=None):
3070
3132
  " fcn(x) for non-standard non-batch functions "
3133
+ x = numpy.asarray(x)
3071
3134
  if self.dict_arg:
3072
3135
  xd = gvar.BufferDict(self.xsample, buf=x)
3073
3136
  if jac is not None:
@@ -3080,8 +3143,9 @@ cdef class _BatchIntegrand_from_Base(object):
3080
3143
  else:
3081
3144
  return self.fcn(x.reshape(self.xsample.shape))
3082
3145
 
3083
- def non_std_arg_batch_fcn(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3146
+ def non_std_arg_batch_fcn(self, x, jac=None):
3084
3147
  " fcn(x) for non-standard batch functions "
3148
+ x = numpy.asarray(x)
3085
3149
  if self.dict_arg:
3086
3150
  if self.rbatch:
3087
3151
  xd = gvar.BufferDict(self.xsample, rbatch_buf=x.T)
@@ -3101,7 +3165,7 @@ cdef class _BatchIntegrand_from_Base(object):
3101
3165
  return self.fcn(x.reshape(sh)) if jac is None else self.fcn(x.reshape(sh), jac=jac.reshape(sh))
3102
3166
 
3103
3167
  cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3104
- cdef readonly numpy.npy_intp size
3168
+ cdef readonly Py_ssize_t size
3105
3169
  cdef readonly object shape
3106
3170
  """ Batch integrand from non-batch integrand. """
3107
3171
 
@@ -3110,43 +3174,50 @@ cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3110
3174
  self.shape = shape
3111
3175
  super(_BatchIntegrand_from_NonBatch, self).__init__(fcn, xsample)
3112
3176
 
3113
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3114
- cdef numpy.npy_intp i
3115
- cdef numpy.ndarray[numpy.float_t, ndim=2] f = numpy.empty(
3116
- (x.shape[0], self.size), FLOAT_TYPE
3177
+ def __call__(self, double[:, :] x, jac=None):
3178
+ cdef Py_ssize_t i, j
3179
+ cdef double[:, ::1] f
3180
+ cdef const double[:] fx
3181
+ _f = numpy.empty(
3182
+ (x.shape[0], self.size), float
3117
3183
  )
3184
+ f = _f
3118
3185
  if self.shape == ():
3119
3186
  # very common special case
3120
3187
  for i in range(x.shape[0]):
3121
- if self.std_arg:
3122
- f[i] = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3123
- else:
3124
- f[i] = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3188
+ if self.std_arg:
3189
+ f[i, 0] = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3190
+ else:
3191
+ f[i, 0] = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3125
3192
  else:
3126
3193
  for i in range(x.shape[0]):
3127
3194
  if self.std_arg:
3128
- f[i] = numpy.asarray(
3195
+ fx = numpy.asarray(
3129
3196
  self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
3130
3197
  ).reshape((-1,))
3131
3198
  else:
3132
- f[i] = numpy.asarray(
3199
+ fx = numpy.asarray(
3133
3200
  self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3134
3201
  ).reshape((-1,))
3135
- return f
3202
+ for j in range(len(fx)):
3203
+ f[i, j] = fx[j]
3204
+ return _f
3136
3205
 
3137
3206
  cdef class _BatchIntegrand_from_NonBatchDict(_BatchIntegrand_from_Base):
3138
- cdef readonly numpy.npy_intp size
3207
+ cdef readonly Py_ssize_t size
3139
3208
  """ Batch integrand from non-batch dict-integrand. """
3140
3209
 
3141
3210
  def __init__(self, fcn, size, xsample=None):
3142
3211
  self.size = size
3143
3212
  super(_BatchIntegrand_from_NonBatchDict, self).__init__(fcn, xsample)
3144
3213
 
3145
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3146
- cdef numpy.npy_intp i
3147
- cdef numpy.ndarray[numpy.double_t, ndim=2] f = numpy.empty(
3214
+ def __call__(self, double[:, :] x, jac=None):
3215
+ cdef Py_ssize_t i, j
3216
+ cdef double[:, ::1] f
3217
+ _f = numpy.empty(
3148
3218
  (x.shape[0], self.size), float
3149
3219
  )
3220
+ f = _f
3150
3221
  for i in range(x.shape[0]):
3151
3222
  if self.std_arg:
3152
3223
  fx = self.fcn(x[i]) if jac is None else self.fcn(x[i], jac=jac[i])
@@ -3154,8 +3225,9 @@ cdef class _BatchIntegrand_from_NonBatchDict(_BatchIntegrand_from_Base):
3154
3225
  fx = self.non_std_arg_fcn(x[i], None if jac is None else jac[i])
3155
3226
  if not isinstance(fx, gvar.BufferDict):
3156
3227
  fx = gvar.BufferDict(fx)
3157
- f[i] = fx.buf
3158
- return f
3228
+ for j in range(f.shape[1]):
3229
+ f[i, j] = fx.buf[j]
3230
+ return _f
3159
3231
 
3160
3232
  cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3161
3233
  cdef readonly object shape
@@ -3166,7 +3238,7 @@ cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3166
3238
  self.rbatch = rbatch
3167
3239
  super(_BatchIntegrand_from_Batch, self).__init__(fcn, xsample)
3168
3240
 
3169
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3241
+ def __call__(self, x, jac=None):
3170
3242
  # call fcn(x)
3171
3243
  if self.std_arg:
3172
3244
  if self.rbatch:
@@ -3191,7 +3263,7 @@ cdef class _BatchIntegrand_from_Batch(_BatchIntegrand_from_Base):
3191
3263
 
3192
3264
 
3193
3265
  cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3194
- cdef readonly numpy.npy_intp size
3266
+ cdef readonly Py_ssize_t size
3195
3267
  cdef readonly object slice
3196
3268
  cdef readonly object shape
3197
3269
  cdef readonly bint rbatch
@@ -3206,11 +3278,13 @@ cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3206
3278
  self.slice[k], self.shape[k] = bdict.slice_shape(k)
3207
3279
  super(_BatchIntegrand_from_BatchDict, self).__init__(fcn, xsample)
3208
3280
 
3209
- def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3210
- cdef numpy.npy_intp i
3211
- cdef numpy.ndarray[numpy.double_t, ndim=2] buf = numpy.empty(
3281
+ def __call__(self, x, jac=None):
3282
+ cdef Py_ssize_t i
3283
+ # cdef double[:, ::1] buf
3284
+ buf = numpy.empty(
3212
3285
  (x.shape[0], self.size), float
3213
3286
  )
3287
+ # buf = _buf
3214
3288
  # call fcn(x)
3215
3289
  if self.std_arg:
3216
3290
  if self.rbatch:
@@ -3241,7 +3315,7 @@ cdef class _BatchIntegrand_from_BatchDict(_BatchIntegrand_from_Base):
3241
3315
 
3242
3316
  # LBatchIntegrand and RBatchIntegrand are container classes for batch integrands.
3243
3317
  cdef class LBatchIntegrand(object):
3244
- """ Wrapper for lbatch integrands.
3318
+ r""" Wrapper for lbatch integrands.
3245
3319
 
3246
3320
  Used by :func:`vegas.lbatchintegrand`.
3247
3321
 
@@ -3263,7 +3337,7 @@ cdef class LBatchIntegrand(object):
3263
3337
  return getattr(self.fcn, attr)
3264
3338
 
3265
3339
  def lbatchintegrand(f):
3266
- """ Decorator for batch integrand functions.
3340
+ r""" Decorator for batch integrand functions.
3267
3341
 
3268
3342
  Applying :func:`vegas.lbatchintegrand` to a function ``fcn`` repackages
3269
3343
  the function in a format that |vegas| can understand. Appropriate
@@ -3295,7 +3369,7 @@ def lbatchintegrand(f):
3295
3369
  return LBatchIntegrand(f)
3296
3370
 
3297
3371
  cdef class RBatchIntegrand(object):
3298
- """ Same as :class:`vegas.LBatchIntegrand` but with batch indices on the right (not left). """
3372
+ r""" Same as :class:`vegas.LBatchIntegrand` but with batch indices on the right (not left). """
3299
3373
  # cdef public object fcn
3300
3374
  def __init__(self, fcn=None):
3301
3375
  self.fcn = self if fcn is None else fcn
@@ -3312,7 +3386,7 @@ cdef class RBatchIntegrand(object):
3312
3386
 
3313
3387
 
3314
3388
  def rbatchintegrand(f):
3315
- """ Same as :func:`vegas.lbatchintegrand` but with batch indices on the right (not left). """
3389
+ r""" Same as :func:`vegas.lbatchintegrand` but with batch indices on the right (not left). """
3316
3390
  try:
3317
3391
  f.fcntype = 'rbatch'
3318
3392
  return f