pyerrors 2.10.0__tar.gz → 2.11.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyerrors-2.10.0 → pyerrors-2.11.1}/PKG-INFO +2 -2
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/__init__.py +1 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/openQCD.py +3 -1
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/sfcf.py +18 -3
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/misc.py +1 -1
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/obs.py +31 -6
- pyerrors-2.11.1/pyerrors/special.py +23 -0
- pyerrors-2.11.1/pyerrors/version.py +1 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors.egg-info/PKG-INFO +2 -2
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors.egg-info/SOURCES.txt +1 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors.egg-info/requires.txt +1 -1
- {pyerrors-2.10.0 → pyerrors-2.11.1}/setup.py +1 -1
- pyerrors-2.10.0/pyerrors/version.py +0 -1
- {pyerrors-2.10.0 → pyerrors-2.11.1}/LICENSE +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/README.md +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/correlators.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/covobs.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/dirac.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/fits.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/__init__.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/bdio.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/dobs.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/hadrons.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/json.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/misc.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/pandas.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/input/utils.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/integrate.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/linalg.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/mpm.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors/roots.py +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors.egg-info/dependency_links.txt +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyerrors.egg-info/top_level.txt +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/pyproject.toml +0 -0
- {pyerrors-2.10.0 → pyerrors-2.11.1}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pyerrors
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.11.1
|
|
4
4
|
Summary: Error propagation and statistical analysis for Monte Carlo simulations
|
|
5
5
|
Home-page: https://github.com/fjosw/pyerrors
|
|
6
6
|
Author: Fabian Joswig
|
|
@@ -22,7 +22,7 @@ Classifier: Topic :: Scientific/Engineering :: Physics
|
|
|
22
22
|
Requires-Python: >=3.8.0
|
|
23
23
|
Description-Content-Type: text/markdown
|
|
24
24
|
License-File: LICENSE
|
|
25
|
-
Requires-Dist: numpy
|
|
25
|
+
Requires-Dist: numpy<2,>=1.24
|
|
26
26
|
Requires-Dist: autograd>=1.6.2
|
|
27
27
|
Requires-Dist: numdifftools>=0.9.41
|
|
28
28
|
Requires-Dist: matplotlib>=3.7
|
|
@@ -1286,7 +1286,9 @@ def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
|
|
|
1286
1286
|
imagsamples[repnum][t].append(corrres[1][t])
|
|
1287
1287
|
if 'idl' in kwargs:
|
|
1288
1288
|
left_idl = list(left_idl)
|
|
1289
|
-
if
|
|
1289
|
+
if expected_idl[repnum] == left_idl:
|
|
1290
|
+
raise ValueError("None of the idls searched for were found in replikum of file " + file)
|
|
1291
|
+
elif len(left_idl) > 0:
|
|
1290
1292
|
warnings.warn('Could not find idls ' + str(left_idl) + ' in replikum of file ' + file, UserWarning)
|
|
1291
1293
|
repnum += 1
|
|
1292
1294
|
s = "Read correlator " + corr + " from " + str(repnum) + " replika with idls" + str(realsamples[0][t])
|
|
@@ -121,7 +121,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|
|
121
121
|
String that separates the ensemble identifier from the configuration number (default 'n').
|
|
122
122
|
replica: list
|
|
123
123
|
list of replica to be read, default is all
|
|
124
|
-
files: list
|
|
124
|
+
files: list[list[int]]
|
|
125
125
|
list of files to be read per replica, default is all.
|
|
126
126
|
for non-compact output format, hand the folders to be read here.
|
|
127
127
|
check_configs: list[list[int]]
|
|
@@ -184,6 +184,8 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|
|
184
184
|
|
|
185
185
|
else:
|
|
186
186
|
replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls]))
|
|
187
|
+
if replica == 0:
|
|
188
|
+
raise Exception('No replica found in directory')
|
|
187
189
|
if not silent:
|
|
188
190
|
print('Read', part, 'part of', name_list, 'from', prefix[:-1], ',', replica, 'replica')
|
|
189
191
|
|
|
@@ -236,6 +238,16 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|
|
236
238
|
rep_path = path + '/' + item
|
|
237
239
|
if "files" in kwargs:
|
|
238
240
|
files = kwargs.get("files")
|
|
241
|
+
if isinstance(files, list):
|
|
242
|
+
if all(isinstance(f, list) for f in files):
|
|
243
|
+
files = files[i]
|
|
244
|
+
elif all(isinstance(f, str) for f in files):
|
|
245
|
+
files = files
|
|
246
|
+
else:
|
|
247
|
+
raise TypeError("files has to be of type list[list[str]] or list[str]!")
|
|
248
|
+
else:
|
|
249
|
+
raise TypeError("files has to be of type list[list[str]] or list[str]!")
|
|
250
|
+
|
|
239
251
|
else:
|
|
240
252
|
files = []
|
|
241
253
|
sub_ls = _find_files(rep_path, prefix, compact, files)
|
|
@@ -248,7 +260,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|
|
248
260
|
else:
|
|
249
261
|
rep_idl.append(int(cfg[3:]))
|
|
250
262
|
except Exception:
|
|
251
|
-
raise Exception("Couldn't parse idl from
|
|
263
|
+
raise Exception("Couldn't parse idl from directory, problem with file " + cfg)
|
|
252
264
|
rep_idl.sort()
|
|
253
265
|
# maybe there is a better way to print the idls
|
|
254
266
|
if not silent:
|
|
@@ -309,7 +321,10 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|
|
309
321
|
w = specs[3]
|
|
310
322
|
w2 = specs[4]
|
|
311
323
|
if "files" in kwargs:
|
|
312
|
-
|
|
324
|
+
if isinstance(kwargs.get("files"), list) and all(isinstance(f, str) for f in kwargs.get("files")):
|
|
325
|
+
name_ls = kwargs.get("files")
|
|
326
|
+
else:
|
|
327
|
+
raise TypeError("In append mode, files has to be of type list[str]!")
|
|
313
328
|
else:
|
|
314
329
|
name_ls = ls
|
|
315
330
|
for exc in name_ls:
|
|
@@ -1138,7 +1138,7 @@ def _intersection_idx(idl):
|
|
|
1138
1138
|
return idinter
|
|
1139
1139
|
|
|
1140
1140
|
|
|
1141
|
-
def _expand_deltas_for_merge(deltas, idx, shape, new_idx):
|
|
1141
|
+
def _expand_deltas_for_merge(deltas, idx, shape, new_idx, scalefactor):
|
|
1142
1142
|
"""Expand deltas defined on idx to the list of configs that is defined by new_idx.
|
|
1143
1143
|
New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest
|
|
1144
1144
|
common divisor of the step sizes is used as new step size.
|
|
@@ -1154,15 +1154,20 @@ def _expand_deltas_for_merge(deltas, idx, shape, new_idx):
|
|
|
1154
1154
|
Number of configs in idx.
|
|
1155
1155
|
new_idx : list
|
|
1156
1156
|
List of configs that defines the new range, has to be sorted in ascending order.
|
|
1157
|
+
scalefactor : float
|
|
1158
|
+
An additional scaling factor that can be applied to scale the fluctuations,
|
|
1159
|
+
e.g., when Obs with differing numbers of replica are merged.
|
|
1157
1160
|
"""
|
|
1158
|
-
|
|
1159
1161
|
if type(idx) is range and type(new_idx) is range:
|
|
1160
1162
|
if idx == new_idx:
|
|
1161
|
-
|
|
1163
|
+
if scalefactor == 1:
|
|
1164
|
+
return deltas
|
|
1165
|
+
else:
|
|
1166
|
+
return deltas * scalefactor
|
|
1162
1167
|
ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
|
|
1163
1168
|
for i in range(shape):
|
|
1164
1169
|
ret[idx[i] - new_idx[0]] = deltas[i]
|
|
1165
|
-
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx)
|
|
1170
|
+
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) * scalefactor
|
|
1166
1171
|
|
|
1167
1172
|
|
|
1168
1173
|
def derived_observable(func, data, array_mode=False, **kwargs):
|
|
@@ -1243,6 +1248,25 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|
|
1243
1248
|
new_r_values[name] = func(tmp_values, **kwargs)
|
|
1244
1249
|
new_idl_d[name] = _merge_idx(idl)
|
|
1245
1250
|
|
|
1251
|
+
def _compute_scalefactor_missing_rep(obs):
|
|
1252
|
+
"""
|
|
1253
|
+
Computes the scale factor that is to be multiplied with the deltas
|
|
1254
|
+
in the case where Obs with different subsets of replica are merged.
|
|
1255
|
+
Returns a dictionary with the scale factor for each Monte Carlo name.
|
|
1256
|
+
|
|
1257
|
+
Parameters
|
|
1258
|
+
----------
|
|
1259
|
+
obs : Obs
|
|
1260
|
+
The observable corresponding to the deltas that are to be scaled
|
|
1261
|
+
"""
|
|
1262
|
+
scalef_d = {}
|
|
1263
|
+
for mc_name in obs.mc_names:
|
|
1264
|
+
mc_idl_d = [name for name in obs.idl if name.startswith(mc_name + '|')]
|
|
1265
|
+
new_mc_idl_d = [name for name in new_idl_d if name.startswith(mc_name + '|')]
|
|
1266
|
+
if len(mc_idl_d) > 0 and len(mc_idl_d) < len(new_mc_idl_d):
|
|
1267
|
+
scalef_d[mc_name] = sum([len(new_idl_d[name]) for name in new_mc_idl_d]) / sum([len(new_idl_d[name]) for name in mc_idl_d])
|
|
1268
|
+
return scalef_d
|
|
1269
|
+
|
|
1246
1270
|
if 'man_grad' in kwargs:
|
|
1247
1271
|
deriv = np.asarray(kwargs.get('man_grad'))
|
|
1248
1272
|
if new_values.shape + data.shape != deriv.shape:
|
|
@@ -1280,7 +1304,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|
|
1280
1304
|
d_extracted[name] = []
|
|
1281
1305
|
ens_length = len(new_idl_d[name])
|
|
1282
1306
|
for i_dat, dat in enumerate(data):
|
|
1283
|
-
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
|
|
1307
|
+
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name], _compute_scalefactor_missing_rep(o).get(name.split('|')[0], 1)) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
|
|
1284
1308
|
for name in new_cov_names:
|
|
1285
1309
|
g_extracted[name] = []
|
|
1286
1310
|
zero_grad = _Zero_grad(new_covobs_lengths[name])
|
|
@@ -1302,11 +1326,12 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|
|
1302
1326
|
new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
|
|
1303
1327
|
else:
|
|
1304
1328
|
for j_obs, obs in np.ndenumerate(data):
|
|
1329
|
+
scalef_d = _compute_scalefactor_missing_rep(obs)
|
|
1305
1330
|
for name in obs.names:
|
|
1306
1331
|
if name in obs.cov_names:
|
|
1307
1332
|
new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
|
|
1308
1333
|
else:
|
|
1309
|
-
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
|
|
1334
|
+
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name], scalef_d.get(name.split('|')[0], 1))
|
|
1310
1335
|
|
|
1311
1336
|
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
|
|
1312
1337
|
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import scipy
|
|
2
|
+
import numpy as np
|
|
3
|
+
from autograd.extend import primitive, defvjp
|
|
4
|
+
from autograd.scipy.special import j0, y0, j1, y1, jn, yn, i0, i1, iv, ive, beta, betainc, betaln
|
|
5
|
+
from autograd.scipy.special import polygamma, psi, digamma, gamma, gammaln, gammainc, gammaincc, gammasgn, rgamma, multigammaln
|
|
6
|
+
from autograd.scipy.special import erf, erfc, erfinv, erfcinv, logit, expit, logsumexp
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
__all__ = ["beta", "betainc", "betaln",
|
|
10
|
+
"polygamma", "psi", "digamma", "gamma", "gammaln", "gammainc", "gammaincc", "gammasgn", "rgamma", "multigammaln",
|
|
11
|
+
"kn", "j0", "y0", "j1", "y1", "jn", "yn", "i0", "i1", "iv", "ive",
|
|
12
|
+
"erf", "erfc", "erfinv", "erfcinv", "logit", "expit", "logsumexp"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@primitive
|
|
16
|
+
def kn(n, x):
|
|
17
|
+
"""Modified Bessel function of the second kind of integer order n"""
|
|
18
|
+
if int(n) != n:
|
|
19
|
+
raise TypeError("The order 'n' needs to be an integer.")
|
|
20
|
+
return scipy.special.kn(n, x)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
defvjp(kn, None, lambda ans, n, x: lambda g: - g * 0.5 * (kn(np.abs(n - 1), x) + kn(n + 1, x)))
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "2.11.1"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pyerrors
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.11.1
|
|
4
4
|
Summary: Error propagation and statistical analysis for Monte Carlo simulations
|
|
5
5
|
Home-page: https://github.com/fjosw/pyerrors
|
|
6
6
|
Author: Fabian Joswig
|
|
@@ -22,7 +22,7 @@ Classifier: Topic :: Scientific/Engineering :: Physics
|
|
|
22
22
|
Requires-Python: >=3.8.0
|
|
23
23
|
Description-Content-Type: text/markdown
|
|
24
24
|
License-File: LICENSE
|
|
25
|
-
Requires-Dist: numpy
|
|
25
|
+
Requires-Dist: numpy<2,>=1.24
|
|
26
26
|
Requires-Dist: autograd>=1.6.2
|
|
27
27
|
Requires-Dist: numdifftools>=0.9.41
|
|
28
28
|
Requires-Dist: matplotlib>=3.7
|
|
@@ -25,7 +25,7 @@ setup(name='pyerrors',
|
|
|
25
25
|
license="MIT",
|
|
26
26
|
packages=find_packages(),
|
|
27
27
|
python_requires='>=3.8.0',
|
|
28
|
-
install_requires=['numpy>=1.24', 'autograd>=1.6.2', 'numdifftools>=0.9.41', 'matplotlib>=3.7', 'scipy>=1.10', 'iminuit>=2.21', 'h5py>=3.8', 'lxml>=4.9', 'python-rapidjson>=1.10', 'pandas>=2.0'],
|
|
28
|
+
install_requires=['numpy>=1.24,<2', 'autograd>=1.6.2', 'numdifftools>=0.9.41', 'matplotlib>=3.7', 'scipy>=1.10', 'iminuit>=2.21', 'h5py>=3.8', 'lxml>=4.9', 'python-rapidjson>=1.10', 'pandas>=2.0'],
|
|
29
29
|
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
|
|
30
30
|
classifiers=[
|
|
31
31
|
'Development Status :: 5 - Production/Stable',
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "2.10.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|