vegas 6.1.2__cp312-cp312-macosx_11_0_arm64.whl → 6.2__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vegas might be problematic. Click here for more details.

Binary file
vegas/_vegas.pyx CHANGED
@@ -34,6 +34,11 @@ import warnings
34
34
  import numpy
35
35
  import gvar
36
36
 
37
+ if numpy.version.version >= '2.0':
38
+ FLOAT_TYPE = numpy.float64
39
+ else:
40
+ FLOAT_TYPE = numpy.float_
41
+
37
42
  cdef double TINY = 10 ** (sys.float_info.min_10_exp + 50) # smallest and biggest
38
43
  cdef double HUGE = 10 ** (sys.float_info.max_10_exp - 50) # with extra headroom
39
44
  cdef double EPSILON = sys.float_info.epsilon * 1e4 # roundoff error threshold (see Schubert and Gertz Table 2)
@@ -230,8 +235,8 @@ cdef class AdaptiveMap:
230
235
  "no of increments < 1 in AdaptiveMap -- %s"
231
236
  % str(ninc)
232
237
  )
233
- new_inc = numpy.empty((dim, max(ninc)), numpy.float_)
234
- new_grid = numpy.empty((dim, new_inc.shape[1] + 1), numpy.float_)
238
+ new_inc = numpy.empty((dim, max(ninc)), FLOAT_TYPE)
239
+ new_grid = numpy.empty((dim, new_inc.shape[1] + 1), FLOAT_TYPE)
235
240
  for d in range(dim):
236
241
  tmp = numpy.linspace(self.grid[d, 0], self.grid[d, self.ninc[d]], ninc[d] + 1)
237
242
  for i in range(ninc[d] + 1):
@@ -256,11 +261,11 @@ cdef class AdaptiveMap:
256
261
  if y is None:
257
262
  y = gvar.RNG.random(size=self.dim)
258
263
  else:
259
- y = numpy.asarray(y, numpy.float_)
264
+ y = numpy.asarray(y, FLOAT_TYPE)
260
265
  y_shape = y.shape
261
266
  y.shape = -1, y.shape[-1]
262
267
  x = 0 * y
263
- jac = numpy.empty(y.shape[0], numpy.float_)
268
+ jac = numpy.empty(y.shape[0], FLOAT_TYPE)
264
269
  self.map(y, x, jac)
265
270
  x.shape = y_shape
266
271
  return x
@@ -282,7 +287,7 @@ cdef class AdaptiveMap:
282
287
  y_shape = y.shape
283
288
  y.shape = -1, y.shape[-1]
284
289
  ny = y.shape[0]
285
- jac = numpy.empty(y.shape, numpy.float_)
290
+ jac = numpy.empty(y.shape, FLOAT_TYPE)
286
291
  for i in range(ny):
287
292
  for d in range(dim):
288
293
  ninc = self.ninc[d]
@@ -451,8 +456,8 @@ cdef class AdaptiveMap:
451
456
  cdef numpy.npy_intp i, d
452
457
  if self.sum_f is None:
453
458
  shape = (self.inc.shape[0], self.inc.shape[1])
454
- self.sum_f = numpy.zeros(shape, numpy.float_)
455
- self.n_f = numpy.zeros(shape, numpy.float_) + TINY
459
+ self.sum_f = numpy.zeros(shape, FLOAT_TYPE)
460
+ self.n_f = numpy.zeros(shape, FLOAT_TYPE) + TINY
456
461
  if ny < 0:
457
462
  ny = y.shape[0]
458
463
  elif ny > y.shape[0]:
@@ -520,12 +525,12 @@ cdef class AdaptiveMap:
520
525
  if min(new_ninc) < 1:
521
526
  raise ValueError('ninc < 1: ' + str(list(new_ninc)))
522
527
  if max(new_ninc) == 1:
523
- new_grid = numpy.empty((dim, 2), numpy.float_)
528
+ new_grid = numpy.empty((dim, 2), FLOAT_TYPE)
524
529
  for d in range(dim):
525
530
  new_grid[d, 0] = self.grid[d, 0]
526
531
  new_grid[d, 1] = self.grid[d, self.ninc[d]]
527
532
  self.grid = numpy.asarray(new_grid)
528
- self.inc = numpy.empty((dim, 1), numpy.float_)
533
+ self.inc = numpy.empty((dim, 1), FLOAT_TYPE)
529
534
  self.ninc = numpy.array(dim * [1], dtype=numpy.intp)
530
535
  for d in range(dim):
531
536
  self.inc[d, 0] = self.grid[d, 1] - self.grid[d, 0]
@@ -533,10 +538,10 @@ cdef class AdaptiveMap:
533
538
  return
534
539
 
535
540
  # smooth and regrid
536
- new_grid = numpy.empty((dim, max(new_ninc) + 1), numpy.float_)
537
- avg_f = numpy.ones(self.inc.shape[1], numpy.float_) # default = uniform
541
+ new_grid = numpy.empty((dim, max(new_ninc) + 1), FLOAT_TYPE)
542
+ avg_f = numpy.ones(self.inc.shape[1], FLOAT_TYPE) # default = uniform
538
543
  if alpha > 0 and max(self.ninc) > 1:
539
- tmp_f = numpy.empty(self.inc.shape[1], numpy.float_)
544
+ tmp_f = numpy.empty(self.inc.shape[1], FLOAT_TYPE)
540
545
  for d in range(dim):
541
546
  old_ninc = self.ninc[d]
542
547
  if alpha != 0 and old_ninc > 1:
@@ -893,7 +898,7 @@ cdef class Integrator(object):
893
898
 
894
899
  More generally, the integrator packages integration points in
895
900
  multidimensional arrays ``x[d1, d2..dn]`` when the integration
896
- limits are specified by ``map[d1, d2...dn, i]` with ``i=0,1``.
901
+ limits are specified by ``map[d1, d2...dn, i]`` with ``i=0,1``.
897
902
  These arrays can have any shape.
898
903
 
899
904
  Alternatively, the integration region can be specified by a
@@ -1142,7 +1147,7 @@ cdef class Integrator(object):
1142
1147
  self.sum_sigf = numpy.sum(self.sigf)
1143
1148
  self.nstrat = numpy.array(map.nstrat)
1144
1149
  else:
1145
- self.sigf = numpy.array([], numpy.float_) # reset sigf (dummy)
1150
+ self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1146
1151
  self.sum_sigf = HUGE
1147
1152
  args = dict(Integrator.defaults)
1148
1153
  if 'map' in args:
@@ -1169,7 +1174,7 @@ cdef class Integrator(object):
1169
1174
  self.sigf_h5.close()
1170
1175
  os.unlink(fname)
1171
1176
  self.sigf_h5 = None
1172
- self.sigf = numpy.array([], numpy.float_) # reset sigf (dummy)
1177
+ self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1173
1178
  self.sum_sigf = HUGE
1174
1179
 
1175
1180
  def __reduce__(Integrator self not None):
@@ -1371,7 +1376,7 @@ cdef class Integrator(object):
1371
1376
  # need to recalculate stratification distribution for beta>0
1372
1377
  # unless a new sigf was set
1373
1378
  old_val['sigf'] = self.sigf
1374
- self.sigf = numpy.array([], numpy.float_) # reset sigf (dummy)
1379
+ self.sigf = numpy.array([], FLOAT_TYPE) # reset sigf (dummy)
1375
1380
  self.sum_sigf = HUGE
1376
1381
  self.nstrat = nstrat
1377
1382
 
@@ -1398,7 +1403,7 @@ cdef class Integrator(object):
1398
1403
  # set up sigf
1399
1404
  self._clear_sigf_h5()
1400
1405
  if not self.minimize_mem:
1401
- self.sigf = numpy.ones(nsigf, numpy.float_)
1406
+ self.sigf = numpy.ones(nsigf, FLOAT_TYPE)
1402
1407
  else:
1403
1408
  try:
1404
1409
  import h5py
@@ -1410,10 +1415,10 @@ cdef class Integrator(object):
1410
1415
  self.sum_sigf = nsigf
1411
1416
  self.neval_hcube = numpy.empty(self.min_neval_batch // 2 + 1, dtype=numpy.intp)
1412
1417
  self.neval_hcube[:] = avg_neval_hcube
1413
- self.y = numpy.empty((self.min_neval_batch, self.dim), numpy.float_)
1414
- self.x = numpy.empty((self.min_neval_batch, self.dim), numpy.float_)
1415
- self.jac = numpy.empty(self.min_neval_batch, numpy.float_)
1416
- self.fdv2 = numpy.empty(self.min_neval_batch, numpy.float_)
1418
+ self.y = numpy.empty((self.min_neval_batch, self.dim), FLOAT_TYPE)
1419
+ self.x = numpy.empty((self.min_neval_batch, self.dim), FLOAT_TYPE)
1420
+ self.jac = numpy.empty(self.min_neval_batch, FLOAT_TYPE)
1421
+ self.fdv2 = numpy.empty(self.min_neval_batch, FLOAT_TYPE)
1417
1422
  return old_val
1418
1423
 
1419
1424
  def settings(Integrator self not None, ngrid=0):
@@ -1569,9 +1574,9 @@ cdef class Integrator(object):
1569
1574
  Integrator self not None,
1570
1575
  bint yield_hcube=False,
1571
1576
  bint yield_y=False,
1572
- fcn = None,
1577
+ # fcn = None,
1573
1578
  ):
1574
- """ Iterator over integration points and weights.
1579
+ """ Low-level batch iterator over integration points and weights.
1575
1580
 
1576
1581
  This method creates an iterator that returns integration
1577
1582
  points from |vegas|, and their corresponding weights in an
@@ -1669,10 +1674,10 @@ cdef class Integrator(object):
1669
1674
 
1670
1675
  # 1) resize work arrays if needed (to double what is needed)
1671
1676
  if neval_batch > self.y.shape[0]:
1672
- self.y = numpy.empty((2 * neval_batch, self.dim), numpy.float_)
1673
- self.x = numpy.empty((2 * neval_batch, self.dim), numpy.float_)
1674
- self.jac = numpy.empty(2 * neval_batch, numpy.float_)
1675
- self.fdv2 = numpy.empty(2 * neval_batch, numpy.float_)
1677
+ self.y = numpy.empty((2 * neval_batch, self.dim), FLOAT_TYPE)
1678
+ self.x = numpy.empty((2 * neval_batch, self.dim), FLOAT_TYPE)
1679
+ self.jac = numpy.empty(2 * neval_batch, FLOAT_TYPE)
1680
+ self.fdv2 = numpy.empty(2 * neval_batch, FLOAT_TYPE)
1676
1681
  y = self.y
1677
1682
  x = self.x
1678
1683
  jac = self.jac
@@ -1689,7 +1694,7 @@ cdef class Integrator(object):
1689
1694
  tmp_hcube = (tmp_hcube - y0[d]) // self.nstrat[d]
1690
1695
  for d in range(self.dim):
1691
1696
  for i in range(i_start, i_start + neval_hcube[ihcube]):
1692
- y[i, d] = (y0[d] + yran[i, d]) / self.nstrat[d]
1697
+ y[i, d] = (y0[d] + yran[i, d]) / self.nstrat[d]
1693
1698
  i_start += neval_hcube[ihcube]
1694
1699
  self.map.map(y, x, jac, neval_batch)
1695
1700
 
@@ -1721,7 +1726,7 @@ cdef class Integrator(object):
1721
1726
  def random(
1722
1727
  Integrator self not None, bint yield_hcube=False, bint yield_y=False
1723
1728
  ):
1724
- """ Iterator over integration points and weights.
1729
+ """ Low-level iterator over integration points and weights.
1725
1730
 
1726
1731
  This method creates an iterator that returns integration
1727
1732
  points from |vegas|, and their corresponding weights in an
@@ -1764,6 +1769,56 @@ cdef class Integrator(object):
1764
1769
  for i in range(x.shape[0]):
1765
1770
  yield (x[i], wgt[i])
1766
1771
 
1772
+ def sample(self, nbatch=None, mode='rbatch'):
1773
+ """ Generate random sample of integration weights and points.
1774
+
1775
+ Given a :class:`vegas.Integrator` called ``integ``, the code ::
1776
+
1777
+ wgt, x = integ.sample(mode='lbatch')
1778
+
1779
+ generates a random array of integration points ``x`` and the
1780
+ array of corresponding weights ``w`` such that ::
1781
+
1782
+ r = sum(wgt * f(x))
1783
+
1784
+ is an estimate of the integral of ``lbatch`` integrand ``f(x)``.
1785
+ Setting parameter ``mode='rbatch'`` formats ``x`` for use
1786
+ in ``rbatch`` integrands.
1787
+
1788
+ Parameter ``nbatch`` specifies the minimum number of integration
1789
+ points in the sample. The actual number is the smallest integer
1790
+ multiple of ``integ.last_neval`` that is equal to or larger than
1791
+ ``nbatch``.
1792
+ """
1793
+ neval = self.last_neval if self.last_neval > 0 else self.neval
1794
+ nbatch = neval if nbatch is None else int(nbatch)
1795
+ nit = nbatch // neval
1796
+ if nit * neval < nbatch:
1797
+ nit += 1
1798
+ samples = []
1799
+ wgts = []
1800
+ for _ in range(nit):
1801
+ for x, w in self.random_batch():
1802
+ samples.append(numpy.array(x))
1803
+ wgts.append(numpy.array(w))
1804
+ samples = numpy.concatenate(samples, axis=0)
1805
+ wgts = numpy.concatenate(wgts) / nit
1806
+ # need to fix following to allow other formats for x
1807
+ if self.xsample.shape is None:
1808
+ if mode == 'rbatch':
1809
+ samples = gvar.BufferDict(self.xsample, rbatch_buf=samples.T)
1810
+ else:
1811
+ samples = gvar.BufferDict(self.xsample, lbatch_buf=samples)
1812
+ else:
1813
+ if self.xsample.shape != ():
1814
+ if mode == 'rbatch':
1815
+ samples = samples.T
1816
+ samples.shape = self.xsample.shape + (-1,)
1817
+ else:
1818
+ samples.shape = (-1,) + self.xsample.shape
1819
+ return wgts, samples
1820
+
1821
+
1767
1822
  @staticmethod
1768
1823
  def synchronize_random():
1769
1824
  try:
@@ -1788,14 +1843,14 @@ cdef class Integrator(object):
1788
1843
  def _make_std_integrand(self, fcn, xsample=None):
1789
1844
  """ Convert integrand ``fcn`` into an lbatch integrand.
1790
1845
 
1791
- Returns an object of ``vi`` of type :class:`VegasIntegrand`.
1846
+ Returns an object ``vi`` of type :class:`VegasIntegrand`.
1792
1847
  This object converts an arbitrary integrand ``fcn`` (``lbatch`, `rbatch`,
1793
1848
  and non-batch, with or without dictionaries for input or output)
1794
1849
  into a standard form: an lbatch integrand whose output is a
1795
1850
  2-d lbatch array.
1796
1851
 
1797
1852
  This is useful when building integrands that call other
1798
- functions of the parameters. The latter are converted
1853
+ functions of the parameters. The latter are converted to
1799
1854
  lbatch integrands irrespective of what they were
1800
1855
  originally. This standardizes them, making it straightforward
1801
1856
  to build them into a new integrand.
@@ -1931,8 +1986,8 @@ cdef class Integrator(object):
1931
1986
  cdef double[::1] sum_wf
1932
1987
  cdef double[::1] sum_dwf
1933
1988
  cdef double[:, ::1] sum_dwf2
1934
- cdef double[::1] mean = numpy.empty(1, numpy.float_)
1935
- cdef double[:, ::1] var = numpy.empty((1, 1), numpy.float_)
1989
+ cdef double[::1] mean = numpy.empty(1, FLOAT_TYPE)
1990
+ cdef double[:, ::1] var = numpy.empty((1, 1), FLOAT_TYPE)
1936
1991
  cdef numpy.npy_intp itn, i, j, jtmp, s, t, neval, fcn_size, len_hcube
1937
1992
  cdef bint adaptive_strat
1938
1993
  cdef double sum_sigf, sigf2
@@ -1963,12 +2018,12 @@ cdef class Integrator(object):
1963
2018
  fcn_size = fcn.size
1964
2019
 
1965
2020
  # allocate work arrays
1966
- dwf = numpy.empty(fcn_size, numpy.float_)
1967
- sum_wf = numpy.empty(fcn_size, numpy.float_)
1968
- sum_dwf = numpy.empty(fcn_size, numpy.float_)
1969
- sum_dwf2 = numpy.empty((fcn_size, fcn_size), numpy.float_)
1970
- mean = numpy.empty(fcn_size, numpy.float_)
1971
- var = numpy.empty((fcn_size, fcn_size), numpy.float_)
2021
+ dwf = numpy.empty(fcn_size, FLOAT_TYPE)
2022
+ sum_wf = numpy.empty(fcn_size, FLOAT_TYPE)
2023
+ sum_dwf = numpy.empty(fcn_size, FLOAT_TYPE)
2024
+ sum_dwf2 = numpy.empty((fcn_size, fcn_size), FLOAT_TYPE)
2025
+ mean = numpy.empty(fcn_size, FLOAT_TYPE)
2026
+ var = numpy.empty((fcn_size, fcn_size), FLOAT_TYPE)
1972
2027
  mean[:] = 0.0
1973
2028
  var[:, :] = 0.0
1974
2029
  result = VegasResult(fcn, weighted=self.adapt)
@@ -1984,7 +2039,7 @@ cdef class Integrator(object):
1984
2039
 
1985
2040
  # iterate batch-slices of integration points
1986
2041
  for x, y, wgt, hcube in self.random_batch(
1987
- yield_hcube=True, yield_y=True, fcn=fcn
2042
+ yield_hcube=True, yield_y=True, #fcn=fcn
1988
2043
  ):
1989
2044
  fdv2 = self.fdv2 # must be inside loop
1990
2045
  len_hcube = len(hcube)
@@ -2005,10 +2060,13 @@ cdef class Integrator(object):
2005
2060
  [(x[i*nx : (i+1)*nx], None) for i in range(self.nproc) if i*nx < x.shape[0]],
2006
2061
  1,
2007
2062
  )
2008
- fx = numpy.concatenate(results, axis=0)
2009
- else:
2010
- fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2011
-
2063
+ fx = numpy.concatenate(results, axis=0, dtype=float)
2064
+ else:
2065
+ # fx = fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None)
2066
+ fx = numpy.asarray(
2067
+ fcn.eval(x, jac=self.map.jac1d(y) if self.uses_jac else None),
2068
+ dtype=float
2069
+ )
2012
2070
  # sanity check
2013
2071
  if numpy.any(numpy.isnan(fx)):
2014
2072
  raise ValueError('integrand evaluates to nan')
@@ -2958,14 +3016,14 @@ cdef class VegasIntegrand:
2958
3016
  nx = x.shape[0] // self.mpi_nproc + 1
2959
3017
  i0 = self.rank * nx
2960
3018
  i1 = min(i0 + nx, x.shape[0])
2961
- f = numpy.empty((nx, self.size), numpy.float_)
3019
+ f = numpy.empty((nx, self.size), FLOAT_TYPE)
2962
3020
  if i1 > i0:
2963
3021
  # fill f so long as haven't gone off end
2964
3022
  if jac is None:
2965
3023
  f[:(i1-i0)] = _eval(x[i0:i1], jac=None)
2966
3024
  else:
2967
3025
  f[:(i1-i0)] = _eval(x[i0:i1], jac=jac[i0:i1])
2968
- results = numpy.empty((self.mpi_nproc * nx, self.size), numpy.float_)
3026
+ results = numpy.empty((self.mpi_nproc * nx, self.size), FLOAT_TYPE)
2969
3027
  self.comm.Allgather(f, results)
2970
3028
  return results[:x.shape[0]]
2971
3029
  self.eval = _mpi_eval
@@ -3108,7 +3166,7 @@ cdef class _BatchIntegrand_from_NonBatch(_BatchIntegrand_from_Base):
3108
3166
  def __call__(self, numpy.ndarray[numpy.double_t, ndim=2] x, jac=None):
3109
3167
  cdef numpy.npy_intp i
3110
3168
  cdef numpy.ndarray[numpy.float_t, ndim=2] f = numpy.empty(
3111
- (x.shape[0], self.size), numpy.float_
3169
+ (x.shape[0], self.size), FLOAT_TYPE
3112
3170
  )
3113
3171
  if self.shape == ():
3114
3172
  # very common special case
vegas/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '6.1.2'
1
+ __version__ = '6.2'
@@ -1,15 +1,15 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vegas
3
- Version: 6.1.2
3
+ Version: 6.2
4
4
  Summary: Tools for adaptive multidimensional Monte Carlo integration.
5
5
  Home-page: https://github.com/gplepage/vegas
6
6
  Author: G. Peter Lepage
7
7
  Author-email: g.p.lepage@cornell.edu
8
8
  License: GPLv3
9
- Requires-Python: >=2.7
9
+ Requires-Python: >=3.9
10
10
  License-File: LICENSE.txt
11
- Requires-Dist: numpy >=1.16
12
- Requires-Dist: gvar >=13.0.1
11
+ Requires-Dist: numpy>=1.24
12
+ Requires-Dist: gvar>=13.0.1
13
13
 
14
14
  vegas
15
15
  -----
@@ -0,0 +1,12 @@
1
+ vegas-6.2.dist-info/RECORD,,
2
+ vegas-6.2.dist-info/WHEEL,sha256=7Wd-yga4fjSiXpUH443rsPZpiZ4h8-uNrXJrYRW_e14,109
3
+ vegas-6.2.dist-info/top_level.txt,sha256=rnAmsIvsHyplln9ev-uw4hM7slW7VUdBQu9VgX8knkE,6
4
+ vegas-6.2.dist-info/LICENSE.txt,sha256=YQSKRpj-PNC7SScHem3AECgwVONM-whKrs74SDueZxM,31996
5
+ vegas-6.2.dist-info/METADATA,sha256=3YW5kUFPh3I6KynW55Y5htgIy8BYgRMGyaIslTzmj0Q,1837
6
+ vegas/_version.py,sha256=NFnZu6AHpSuEkoJMZnfac0tWxuaxUxoN8Zg1vDKb_RE,20
7
+ vegas/__init__.pxd,sha256=MzfsI-0xD0rFnyHl1UFXz-2wSvGjfGA9QFKuCTr5nhs,646
8
+ vegas/__init__.py,sha256=TWzA9zsS_xiNa_K3nC1NZZ4MlU7Vwaz0YtGJatzn3Jw,55840
9
+ vegas/_vegas.pyx,sha256=XStCrwRe6kq8sCChLeaK2mQia89K8Jqhpp6s_84KhzE,143436
10
+ vegas/_vegas.cpython-312-darwin.so,sha256=iXuWqWk7M6yM5jc4k00T25dl1EUCh6gcwYD24irvtAU,1308080
11
+ vegas/_vegas.pxd,sha256=Qe0-Zuep8t9OXUq8yLLZ1T_e-7zABM4aN2inX6N4BX4,3035
12
+ vegas/_vegas.c,sha256=U9YeShWlxm32BRD5VC-X6WhMVXzNtsTiDesK9UW5MrA,5543127
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp312-cp312-macosx_11_0_arm64
5
5
 
@@ -1,12 +0,0 @@
1
- vegas-6.1.2.dist-info/RECORD,,
2
- vegas-6.1.2.dist-info/WHEEL,sha256=Vo9YTsjXxZ5SWdH4n69oS5jU3YTIi3eHk0n-aUcTtlw,110
3
- vegas-6.1.2.dist-info/top_level.txt,sha256=rnAmsIvsHyplln9ev-uw4hM7slW7VUdBQu9VgX8knkE,6
4
- vegas-6.1.2.dist-info/LICENSE.txt,sha256=YQSKRpj-PNC7SScHem3AECgwVONM-whKrs74SDueZxM,31996
5
- vegas-6.1.2.dist-info/METADATA,sha256=kL6jNelQ5SZFzJY3F8WlI6MsbskzBWxD7muxn6WQxUM,1841
6
- vegas/_version.py,sha256=Uej179rJLsiLVamUZe0A6Q6mYkLR6ugvUdzQjDyWjdU,22
7
- vegas/__init__.pxd,sha256=MzfsI-0xD0rFnyHl1UFXz-2wSvGjfGA9QFKuCTr5nhs,646
8
- vegas/__init__.py,sha256=TWzA9zsS_xiNa_K3nC1NZZ4MlU7Vwaz0YtGJatzn3Jw,55840
9
- vegas/_vegas.pyx,sha256=jc9SbG7IUP4c16Sn65zb-avP3Hdjz3S7LMunbVVEpe8,141213
10
- vegas/_vegas.cpython-312-darwin.so,sha256=ZaZuWQPvD6UdQDcB7jFRxVcLuKffvOMk16cvDWH95DQ,1307392
11
- vegas/_vegas.pxd,sha256=Qe0-Zuep8t9OXUq8yLLZ1T_e-7zABM4aN2inX6N4BX4,3035
12
- vegas/_vegas.c,sha256=4kBsVVlIc6FuZlJ-lFZHEkyyR3opmQsOSlo4aQpqvUQ,5478669