mini-pole 0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mini_pole/__init__.py +5 -0
- mini_pole/con_map.py +167 -0
- mini_pole/esprit.py +160 -0
- mini_pole/green_func.py +101 -0
- mini_pole/mini_pole.py +420 -0
- mini_pole/mini_pole_dlr.py +62 -0
- mini_pole/spectrum_example.py +132 -0
- mini_pole-0.2.dist-info/LICENSE +21 -0
- mini_pole-0.2.dist-info/METADATA +125 -0
- mini_pole-0.2.dist-info/RECORD +12 -0
- mini_pole-0.2.dist-info/WHEEL +5 -0
- mini_pole-0.2.dist-info/top_level.txt +1 -0
mini_pole/__init__.py
ADDED
mini_pole/con_map.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
class ConMapGeneric:
|
|
4
|
+
'''
|
|
5
|
+
Generic holomorphic mapping which works for any cases.
|
|
6
|
+
'''
|
|
7
|
+
def __init__(self, w_m, dw_h, branch_in = True):
|
|
8
|
+
'''
|
|
9
|
+
Initialize the class with w_m and dw_h, which correspond to $\omega_{\rm m}$ and $\Delta \omega_{\rm h}$ in the paper, respectively.
|
|
10
|
+
Points in the z plane are mapped to the inside (outside) of the unit disk in the w plane when branch_in is True (False).
|
|
11
|
+
'''
|
|
12
|
+
assert dw_h.real > 0.0 and dw_h.imag == 0.0
|
|
13
|
+
self.w_m = w_m
|
|
14
|
+
self.dw_h = dw_h
|
|
15
|
+
self.branch_in = branch_in
|
|
16
|
+
self.w_inf = [0.0]
|
|
17
|
+
|
|
18
|
+
def cal_z(self, w):
|
|
19
|
+
'''
|
|
20
|
+
Intermediate function of z(w) which only works for a single point.
|
|
21
|
+
'''
|
|
22
|
+
if w in self.w_inf:
|
|
23
|
+
return np.inf
|
|
24
|
+
else:
|
|
25
|
+
return 0.5 * self.dw_h * (w - 1.0 / w) + 1j * self.w_m
|
|
26
|
+
|
|
27
|
+
def cal_w(self, z):
|
|
28
|
+
'''
|
|
29
|
+
Intermediate function of w(z) which only works for a single point.
|
|
30
|
+
'''
|
|
31
|
+
x = (z - 1j * self.w_m) / self.dw_h
|
|
32
|
+
w = x - np.sqrt(x ** 2.0 + 1.0)
|
|
33
|
+
if self.branch_in:
|
|
34
|
+
if np.absolute(w) > 1.0:
|
|
35
|
+
w = 2.0 * x - w
|
|
36
|
+
else:
|
|
37
|
+
if np.absolute(w) < 1.0:
|
|
38
|
+
w = 2.0 * x - w
|
|
39
|
+
return w
|
|
40
|
+
|
|
41
|
+
def cal_dz(self, w):
|
|
42
|
+
'''
|
|
43
|
+
Intermediate function of dz(w) which only works for single point.
|
|
44
|
+
'''
|
|
45
|
+
if w in self.w_inf:
|
|
46
|
+
return np.inf
|
|
47
|
+
else:
|
|
48
|
+
return 0.5 * self.dw_h * (1.0 + 1.0 / w ** 2.0)
|
|
49
|
+
|
|
50
|
+
def z(self, w):
|
|
51
|
+
'''
|
|
52
|
+
Calculate z from w.
|
|
53
|
+
'''
|
|
54
|
+
return np.vectorize(self.cal_z)(w)
|
|
55
|
+
|
|
56
|
+
def w(self, z):
|
|
57
|
+
'''
|
|
58
|
+
Calculate w from z.
|
|
59
|
+
'''
|
|
60
|
+
return np.vectorize(self.cal_w)(z)
|
|
61
|
+
|
|
62
|
+
def dz(self, w):
|
|
63
|
+
'''
|
|
64
|
+
Calculate dz/dw at value w.
|
|
65
|
+
'''
|
|
66
|
+
return np.vectorize(self.cal_dz)(w)
|
|
67
|
+
|
|
68
|
+
class ConMapGapless:
|
|
69
|
+
'''
|
|
70
|
+
conformal mapping which works for both gapless and gapped cases
|
|
71
|
+
'''
|
|
72
|
+
def __init__(self, w_min):
|
|
73
|
+
assert w_min > 0.0
|
|
74
|
+
self.w_min = w_min
|
|
75
|
+
self.w_inf = [-1.0, 1.0]
|
|
76
|
+
|
|
77
|
+
def cal_z(self, w):
|
|
78
|
+
assert np.abs(w) < 1.0 + 1.e-15
|
|
79
|
+
if w in self.w_inf:
|
|
80
|
+
return np.inf
|
|
81
|
+
else:
|
|
82
|
+
return 2.0 * self.w_min * w / (1.0 - w * w)
|
|
83
|
+
|
|
84
|
+
def cal_w(self, z):
|
|
85
|
+
if z == 0.0:
|
|
86
|
+
w = 0.0
|
|
87
|
+
else:
|
|
88
|
+
w = self.w_min * (np.sqrt(1.0 / (z * z) + 1.0 / (self.w_min * self.w_min)) - 1.0 / z)
|
|
89
|
+
if np.absolute(w) > 1.0:
|
|
90
|
+
w = -w - 2.0 * self.w_min / z
|
|
91
|
+
return w
|
|
92
|
+
|
|
93
|
+
def cal_dz(self, w):
|
|
94
|
+
assert np.abs(w) < 1.0 + 1.e-15
|
|
95
|
+
if w in self.w_inf:
|
|
96
|
+
return np.inf
|
|
97
|
+
else:
|
|
98
|
+
return 2.0 * self.w_min * (1.0 + w ** 2) / (1.0 - w ** 2) ** 2
|
|
99
|
+
|
|
100
|
+
def z(self, w):
|
|
101
|
+
return np.vectorize(self.cal_z)(w)
|
|
102
|
+
|
|
103
|
+
def w(self, z):
|
|
104
|
+
return np.vectorize(self.cal_w)(z)
|
|
105
|
+
|
|
106
|
+
def dz(self, w):
|
|
107
|
+
return np.vectorize(self.cal_dz)(w)
|
|
108
|
+
|
|
109
|
+
class ConMapRet:
|
|
110
|
+
'''
|
|
111
|
+
Holomorphic mapping for the retarded Green's function.
|
|
112
|
+
'''
|
|
113
|
+
def __init__(self, w_m, dw_h):
|
|
114
|
+
'''
|
|
115
|
+
Initialize the class with w_m and dw_h.
|
|
116
|
+
Points in the z plane are mapped to the inside of the unit disk in the w plane.
|
|
117
|
+
'''
|
|
118
|
+
assert dw_h.real > 0.0 and dw_h.imag == 0.0
|
|
119
|
+
self.w_m = w_m
|
|
120
|
+
self.dw_h = dw_h
|
|
121
|
+
self.w_inf = [0.0]
|
|
122
|
+
|
|
123
|
+
def cal_z(self, w):
|
|
124
|
+
'''
|
|
125
|
+
Intermediate function of z(w) which only works for a single point.
|
|
126
|
+
'''
|
|
127
|
+
if w in self.w_inf:
|
|
128
|
+
return np.inf
|
|
129
|
+
else:
|
|
130
|
+
return 0.5 * self.dw_h * (w + 1.0 / w) + self.w_m
|
|
131
|
+
|
|
132
|
+
def cal_w(self, z):
|
|
133
|
+
'''
|
|
134
|
+
Intermediate function of w(z) which only works for a single point.
|
|
135
|
+
'''
|
|
136
|
+
x = (z - self.w_m) / self.dw_h
|
|
137
|
+
w = x + np.sqrt(x ** 2.0 - 1.0 + 0j)
|
|
138
|
+
if np.absolute(w) > 1.0:
|
|
139
|
+
w = 2.0 * x - w
|
|
140
|
+
return w
|
|
141
|
+
|
|
142
|
+
def cal_dz(self, w):
|
|
143
|
+
'''
|
|
144
|
+
Intermediate function of dz(w) which only works for single point.
|
|
145
|
+
'''
|
|
146
|
+
if w in self.w_inf:
|
|
147
|
+
return np.inf
|
|
148
|
+
else:
|
|
149
|
+
return 0.5 * self.dw_h * (1.0 - 1.0 / w ** 2.0)
|
|
150
|
+
|
|
151
|
+
def z(self, w):
|
|
152
|
+
'''
|
|
153
|
+
Calculate z from w.
|
|
154
|
+
'''
|
|
155
|
+
return np.vectorize(self.cal_z)(w)
|
|
156
|
+
|
|
157
|
+
def w(self, z):
|
|
158
|
+
'''
|
|
159
|
+
Calculate w from z.
|
|
160
|
+
'''
|
|
161
|
+
return np.vectorize(self.cal_w)(z)
|
|
162
|
+
|
|
163
|
+
def dz(self, w):
|
|
164
|
+
'''
|
|
165
|
+
Calculate dz/dw at value w.
|
|
166
|
+
'''
|
|
167
|
+
return np.vectorize(self.cal_dz)(w)
|
mini_pole/esprit.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import warnings
|
|
3
|
+
|
|
4
|
+
class ESPRIT:
|
|
5
|
+
'''
|
|
6
|
+
Matrix version of the ESPRIT method for approximating functions with complex exponentials.
|
|
7
|
+
'''
|
|
8
|
+
def __init__(self, h_k, x_min = 0, x_max = 1, err = None, err_type = "abs", M = None, Lfactor = 0.4, tol = 1.e-15, ctrl_ratio = 10):
|
|
9
|
+
'''
|
|
10
|
+
Initialize with function values sampled on a uniform grid from x_min to x_max.
|
|
11
|
+
'''
|
|
12
|
+
h_k = h_k.reshape(h_k.shape[0], -1)
|
|
13
|
+
self.N = h_k.shape[0]
|
|
14
|
+
self.dim = h_k.shape[1]
|
|
15
|
+
if Lfactor < 1.0 / 3.0 or Lfactor > 0.5:
|
|
16
|
+
warnings.warn("It is suggested to set 1 / 3 <= Lfactor <= 1 / 2.")
|
|
17
|
+
self.L = int(Lfactor * (self.N - 1))
|
|
18
|
+
assert (self.N - self.L) >= (self.L + 1)
|
|
19
|
+
assert x_min < x_max
|
|
20
|
+
assert err_type in ["abs", "rel"]
|
|
21
|
+
|
|
22
|
+
if np.max(np.abs(h_k.imag)) < tol:
|
|
23
|
+
self.type = "real"
|
|
24
|
+
self.h_k = np.array(h_k.real)
|
|
25
|
+
elif np.max(np.abs(h_k.real)) < tol:
|
|
26
|
+
self.type = "imag"
|
|
27
|
+
self.h_k = np.array(1j * h_k.imag)
|
|
28
|
+
else:
|
|
29
|
+
self.type = "cplx"
|
|
30
|
+
self.h_k = np.array(h_k)
|
|
31
|
+
self.x_min = x_min
|
|
32
|
+
self.x_max = x_max
|
|
33
|
+
self.x_k = np.linspace(self.x_min, self.x_max, self.N)
|
|
34
|
+
self.err = err
|
|
35
|
+
self.err_type = err_type
|
|
36
|
+
self.M = M
|
|
37
|
+
self.tol = tol
|
|
38
|
+
|
|
39
|
+
#note to set data type to be complex even if the input is real! Otherwise the result might be unstable!
|
|
40
|
+
self.H = np.zeros((self.dim * (self.N - self.L), self.L + 1), dtype=np.complex_)
|
|
41
|
+
for l in range(self.N - self.L):
|
|
42
|
+
self.H[(self.dim * l):(self.dim * (l + 1)), :] = self.h_k[l:(l + self.L + 1)].T
|
|
43
|
+
|
|
44
|
+
#there is a very low chance that SVD does not converge
|
|
45
|
+
while True:
|
|
46
|
+
try:
|
|
47
|
+
_, self.S, self.W = np.linalg.svd(self.H, full_matrices=False)
|
|
48
|
+
break
|
|
49
|
+
except:
|
|
50
|
+
self.H = self.H[:, :-1]
|
|
51
|
+
|
|
52
|
+
if self.M is None:
|
|
53
|
+
self.find_M_with_err() if self.err is not None else self.find_M_with_exp_decay()
|
|
54
|
+
else:
|
|
55
|
+
self.M = min(self.M, self.S.size - 1)
|
|
56
|
+
while True:
|
|
57
|
+
self.sigma = self.S[self.M]
|
|
58
|
+
self.W_0 = self.W[:self.M, :-1]
|
|
59
|
+
self.W_1 = self.W[:self.M, 1:]
|
|
60
|
+
self.F_M = np.linalg.pinv(self.W_0.T) @ self.W_1.T
|
|
61
|
+
|
|
62
|
+
self.gamma = np.linalg.eigvals(self.F_M)
|
|
63
|
+
self.find_omega()
|
|
64
|
+
self.cal_err()
|
|
65
|
+
|
|
66
|
+
if self.err_max < max(ctrl_ratio * self.sigma, 1.e-14 * self.S[0]):
|
|
67
|
+
break
|
|
68
|
+
else:
|
|
69
|
+
self.M -= 1
|
|
70
|
+
if self.M == 0:
|
|
71
|
+
raise Exception("Could not find controlled approximation!")
|
|
72
|
+
|
|
73
|
+
def find_M_with_err(self):
|
|
74
|
+
'''
|
|
75
|
+
Find the rank M for the given error tolerance.
|
|
76
|
+
'''
|
|
77
|
+
cutoff = self.err if self.err_type == "abs" else self.S[0] * self.err
|
|
78
|
+
for idx in range(self.S.size):
|
|
79
|
+
if self.S[idx] < cutoff:
|
|
80
|
+
break
|
|
81
|
+
if self.S[idx] >= cutoff:
|
|
82
|
+
print("err is set to be too small!")
|
|
83
|
+
self.M = idx
|
|
84
|
+
|
|
85
|
+
'''
|
|
86
|
+
def find_M_with_exp_decay(self):
|
|
87
|
+
''
|
|
88
|
+
Find the maximum index for the exponentially decaying region.
|
|
89
|
+
''
|
|
90
|
+
log_S = -np.log(self.S)
|
|
91
|
+
dS = log_S[1] - log_S[0]
|
|
92
|
+
self.M = np.where((np.diff(log_S) < 0.2 * dS) == True)[0][0]
|
|
93
|
+
'''
|
|
94
|
+
|
|
95
|
+
def find_M_with_exp_decay(self):
|
|
96
|
+
'''
|
|
97
|
+
Find the maximum index for the exponentially decaying region.
|
|
98
|
+
'''
|
|
99
|
+
n_max = min(3 * int(np.log(1.e12)), int(0.8 * self.S.size))
|
|
100
|
+
idx_fit = np.arange(int(0.5 * n_max), n_max)
|
|
101
|
+
val_fit = self.S[idx_fit]
|
|
102
|
+
|
|
103
|
+
A = np.vstack((idx_fit, np.ones_like(idx_fit))).T
|
|
104
|
+
a, b = np.linalg.pinv(A) @ np.log(val_fit)
|
|
105
|
+
self.S_approx = np.exp(a * np.arange(n_max) + b)
|
|
106
|
+
self.M = sum(self.S[:n_max] > 3.0 * self.S_approx) + 1
|
|
107
|
+
|
|
108
|
+
def find_omega(self):
|
|
109
|
+
'''
|
|
110
|
+
Find weights of corresponding nodes gamma.
|
|
111
|
+
'''
|
|
112
|
+
V = np.zeros((self.h_k.shape[0], self.M), dtype=np.complex_)
|
|
113
|
+
for i in range(V.shape[0]):
|
|
114
|
+
V[i, :] = self.gamma ** i
|
|
115
|
+
#using least-squares solution is more stable than using pseudo-inverse
|
|
116
|
+
#setting rcond=None (default) sometimes leads to incorrect result for high-precision input
|
|
117
|
+
self.omega, residuals, rank, s = np.linalg.lstsq(V, self.h_k, rcond=-1)
|
|
118
|
+
self.lstsq_quality = (residuals, rank, s)
|
|
119
|
+
|
|
120
|
+
def cal_err(self):
|
|
121
|
+
h_k_approx = self.get_value(self.x_k)
|
|
122
|
+
self.err_max = np.abs(h_k_approx - self.h_k).max(axis=0).max()
|
|
123
|
+
self.err_ave = np.abs(h_k_approx - self.h_k).mean(axis=0).max()
|
|
124
|
+
|
|
125
|
+
def get_value_indiv(self, x, col):
|
|
126
|
+
'''
|
|
127
|
+
Get the approximated function value at point x for column col.
|
|
128
|
+
'''
|
|
129
|
+
assert col >= 0 and col < self.dim
|
|
130
|
+
x0 = (x - self.x_min) / (self.x_max - self.x_min)
|
|
131
|
+
if np.any(x0 < -1.e-12) or np.any(x0 > 1.0 + 1.e-12):
|
|
132
|
+
raise Exception("This approximation only has error control for x in [x_min, x_max]!")
|
|
133
|
+
|
|
134
|
+
if np.isscalar(x0):
|
|
135
|
+
V = self.gamma ** ((self.h_k.shape[0] - 1) * x0)
|
|
136
|
+
value = np.dot(V, self.omega[:, col])
|
|
137
|
+
else:
|
|
138
|
+
V = np.zeros((x0.size, self.gamma.size), dtype=np.complex_)
|
|
139
|
+
for i in range(V.shape[0]):
|
|
140
|
+
V[i, :] = self.gamma ** ((self.h_k.shape[0] - 1) * x0[i])
|
|
141
|
+
value = np.dot(V, self.omega[:, col])
|
|
142
|
+
return value if self.type == "cplx" else value.real if self.type == "real" else 1j * value.imag
|
|
143
|
+
|
|
144
|
+
def get_value(self, x):
|
|
145
|
+
'''
|
|
146
|
+
Get the approximated function value at point x.
|
|
147
|
+
'''
|
|
148
|
+
x0 = (x - self.x_min) / (self.x_max - self.x_min)
|
|
149
|
+
if np.any(x0 < -1.e-12) or np.any(x0 > 1.0 + 1.e-12):
|
|
150
|
+
raise Exception("This approximation only has error control for x in [x_min, x_max]!")
|
|
151
|
+
|
|
152
|
+
if np.isscalar(x0):
|
|
153
|
+
V = self.gamma ** ((self.h_k.shape[0] - 1) * x0)
|
|
154
|
+
value = np.dot(V, self.omega)
|
|
155
|
+
else:
|
|
156
|
+
V = np.zeros((x0.size, self.gamma.size), dtype=np.complex_)
|
|
157
|
+
for i in range(V.shape[0]):
|
|
158
|
+
V[i, :] = self.gamma ** ((self.h_k.shape[0] - 1) * x0[i])
|
|
159
|
+
value = np.dot(V, self.omega)
|
|
160
|
+
return value if self.type == "cplx" else value.real if self.type == "real" else 1j * value.imag
|
mini_pole/green_func.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import scipy.integrate as integrate
|
|
3
|
+
|
|
4
|
+
class GreenFunc:
|
|
5
|
+
'''
|
|
6
|
+
Green's function class which works for both discrete and continuous cases.
|
|
7
|
+
'''
|
|
8
|
+
def __init__(self, statistics, beta, flag = "discrete", A_i = None, x_i = None, A_x = None, x_min = -10, x_max = 10, tol = 0.01):
|
|
9
|
+
'''
|
|
10
|
+
Meaning of each parameter:
|
|
11
|
+
1. statistics stands for the statistics of the system, which is either "F" (for fermions) or "B" (for bosons);
|
|
12
|
+
2. beta is the inverse temperature in Hartree atomic units;
|
|
13
|
+
3. flag is "discrete" for discrete cases (with delta peaks) or "continuous" for continuous cases (with broadened peaks);
|
|
14
|
+
4. For discrete cases, an array of pole weights A_i and pole locations x_i should be provided,
|
|
15
|
+
while for continuous cases, the function form A_x should be provided, along with non-zero range.
|
|
16
|
+
If A_x is non-zero everywhere, x_min should be -np.inf and x_max should be np.inf.
|
|
17
|
+
5. tol is the value to distinguish delta peaks and broadened peaks for discrete cases, according to the imaginary part of pole locations.
|
|
18
|
+
'''
|
|
19
|
+
assert statistics.upper() in ["F", "B"]
|
|
20
|
+
assert flag.lower() in ["discrete", "continuous"]
|
|
21
|
+
|
|
22
|
+
self.statistics = statistics.upper()
|
|
23
|
+
self.beta = beta
|
|
24
|
+
self.flag = flag.lower()
|
|
25
|
+
if self.flag == "discrete":
|
|
26
|
+
assert A_i is not None and x_i is not None
|
|
27
|
+
assert A_i.size == x_i.size
|
|
28
|
+
#pole locations and weights can both be complex-valued
|
|
29
|
+
self.A_i = A_i
|
|
30
|
+
if np.max(np.abs(x_i.imag)) < 1.e-15:
|
|
31
|
+
self.x_i = x_i.real
|
|
32
|
+
else:
|
|
33
|
+
self.x_i = x_i
|
|
34
|
+
self.tol = tol
|
|
35
|
+
else:
|
|
36
|
+
#A_x(x) is a continuous function of x
|
|
37
|
+
assert A_x is not None
|
|
38
|
+
self.A_x = A_x
|
|
39
|
+
self.x_min = x_min
|
|
40
|
+
self.x_max = x_max
|
|
41
|
+
|
|
42
|
+
def get_matsubara(self, N_up = 1000, lower = False):
|
|
43
|
+
'''
|
|
44
|
+
Calculate the Matsubara data.
|
|
45
|
+
N_up is the number of non-negative frequencies.
|
|
46
|
+
lower controls whether data on negative frequencies is calculated or not.
|
|
47
|
+
'''
|
|
48
|
+
if self.statistics == "F":
|
|
49
|
+
if lower is False:
|
|
50
|
+
n_sample = np.arange(N_up)
|
|
51
|
+
else:
|
|
52
|
+
n_sample = np.arange(-N_up, N_up)
|
|
53
|
+
self.w = (2 * n_sample + 1) * np.pi / self.beta
|
|
54
|
+
else:
|
|
55
|
+
if lower is False:
|
|
56
|
+
n_sample = np.arange(N_up)
|
|
57
|
+
else:
|
|
58
|
+
n_sample = np.arange(-(N_up - 1), N_up)
|
|
59
|
+
self.w = 2 * n_sample * np.pi / self.beta
|
|
60
|
+
|
|
61
|
+
self.N_tot = self.w.size
|
|
62
|
+
self.w_min = self.w[0]
|
|
63
|
+
self.w_max = self.w[-1]
|
|
64
|
+
self.G_w = self.get_G(1j * self.w)
|
|
65
|
+
|
|
66
|
+
def cal_G(self, z, err=1.e-13):
|
|
67
|
+
'''
|
|
68
|
+
Calculate G(z) at point z. err is the accuracy of the integration we want to achieve.
|
|
69
|
+
If err is chosen to be too small, there will be some warnings.
|
|
70
|
+
'''
|
|
71
|
+
if self.flag == "discrete":
|
|
72
|
+
G_z = 0.0
|
|
73
|
+
for i in range(self.x_i.size):
|
|
74
|
+
G_z += self.A_i[i] / (z - self.x_i[i])
|
|
75
|
+
else:
|
|
76
|
+
G_z = integrate.quad(lambda x: self.A_x(x) * (1.0 / (z - x)).real, self.x_min, self.x_max, epsabs=err, epsrel=err, limit=10000)[0] \
|
|
77
|
+
+ 1j * integrate.quad(lambda x: self.A_x(x) * (1.0 / (z - x)).imag, self.x_min, self.x_max, epsabs=err, epsrel=err, limit=10000)[0]
|
|
78
|
+
|
|
79
|
+
return G_z
|
|
80
|
+
|
|
81
|
+
def get_G(self, z, err=1.e-13):
|
|
82
|
+
'''
|
|
83
|
+
Vectorized version of G(z) which can deal with an array.
|
|
84
|
+
'''
|
|
85
|
+
return np.vectorize(lambda x: self.cal_G(x, err))(z)
|
|
86
|
+
|
|
87
|
+
def get_spectral(self, x, epsilon = 0.01):
|
|
88
|
+
'''
|
|
89
|
+
epsilon is the broadening parameter for delta peaks.
|
|
90
|
+
'''
|
|
91
|
+
if self.flag == "discrete":
|
|
92
|
+
A_w = 0.0
|
|
93
|
+
for i in range(self.x_i.size):
|
|
94
|
+
if np.abs(self.x_i[i].imag) < self.tol:
|
|
95
|
+
A_w += (1.0 / np.pi) * self.A_i[i].real * np.abs(epsilon) / ((x - self.x_i[i].real)**2.0 + epsilon**2.0)
|
|
96
|
+
else:
|
|
97
|
+
A_w += -1.0 / np.pi * (self.A_i[i] / (x - self.x_i[i])).imag
|
|
98
|
+
else:
|
|
99
|
+
A_w = np.vectorize(self.A_x)(x)
|
|
100
|
+
|
|
101
|
+
return A_w
|
mini_pole/mini_pole.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import scipy.integrate as integrate
|
|
3
|
+
from .esprit import ESPRIT
|
|
4
|
+
from .con_map import ConMapGeneric, ConMapGapless
|
|
5
|
+
from .green_func import GreenFunc
|
|
6
|
+
|
|
7
|
+
class MiniPole:
|
|
8
|
+
def __init__(self, G_w, w, n0 = "auto", n0_shift = 0, err = None, err_type = "abs", M = None, symmetry = False, G_symmetric = False, compute_const = False, plane = None, include_n0 = False, k_max = 999, ratio_max = 10):
|
|
9
|
+
'''
|
|
10
|
+
A Python program for obtaining the matrix-valued minimal pole representation.
|
|
11
|
+
|
|
12
|
+
Parameters
|
|
13
|
+
----------
|
|
14
|
+
G_w : ndarray
|
|
15
|
+
An (n_w, n_orb, n_orb) or (n_w,) array containing the Matsubara data.
|
|
16
|
+
w : ndarray
|
|
17
|
+
An (n_w,) array containing the corresponding real-valued Matsubara grid.
|
|
18
|
+
n0 : int or str, default="auto"
|
|
19
|
+
If "auto", n0 is automatically selected with an additional shift specified by n0_shift.
|
|
20
|
+
If a non-negative integer is provided, n0 is fixed at that value.
|
|
21
|
+
n0_shift : int, default=0
|
|
22
|
+
The shift applied to the automatically determined n0.
|
|
23
|
+
err : float
|
|
24
|
+
Error tolerance for calculations.
|
|
25
|
+
err_type : str, default="abs"
|
|
26
|
+
Specifies the type of error: "abs" for absolute error or "rel" for relative error.
|
|
27
|
+
M : int, optional
|
|
28
|
+
The number of poles in the final result. If not specified, the precision from the first ESPRIT is used to extract poles in the second ESPRIT.
|
|
29
|
+
symmetry : bool, default=False
|
|
30
|
+
Determines whether to preserve up-down symmetry.
|
|
31
|
+
G_symmetric : bool, default=False
|
|
32
|
+
If True, the Matsubara data will be symmetrized such that G_{ij}(z) = G_{ji}(z).
|
|
33
|
+
compute_const : bool, default=False
|
|
34
|
+
Determines whether to compute the constant term in G(z) = sum_l Al / (z - xl) + const.
|
|
35
|
+
If False, the constant term is fixed at 0.
|
|
36
|
+
plane : str, optional
|
|
37
|
+
Specifies whether to use the original z-plane or the mapped w-plane to compute pole weights.
|
|
38
|
+
include_n0 : bool, default=True
|
|
39
|
+
Determines whether to include the first n0 input points when weights are calculated in the z-plane.
|
|
40
|
+
k_max : int, default=999
|
|
41
|
+
The maximum number of contour integrals.
|
|
42
|
+
ratio_max : float, default=10
|
|
43
|
+
The maximum ratio of oscillation when automatically choosing n0.
|
|
44
|
+
|
|
45
|
+
Returns
|
|
46
|
+
-------
|
|
47
|
+
Minimal pole representation of the given data.
|
|
48
|
+
Pole weights are stored in `self.pole_weight', a numpy array of shape (M, n_orb, n_orb).
|
|
49
|
+
Shared pole locations are stored in `self.pole_location', a numpy array of shape (M,).
|
|
50
|
+
'''
|
|
51
|
+
if G_w.ndim == 1:
|
|
52
|
+
G_w = G_w.reshape(-1, 1, 1)
|
|
53
|
+
assert G_w.ndim == 3
|
|
54
|
+
assert G_w.shape[0] == w.size and G_w.shape[1] == G_w.shape[2]
|
|
55
|
+
assert w[0] >= 0.0
|
|
56
|
+
assert np.linalg.norm(np.diff(np.diff(w)), ord=np.inf) < 1.e-10
|
|
57
|
+
|
|
58
|
+
self.n_w = w.size
|
|
59
|
+
self.n_orb = G_w.shape[1]
|
|
60
|
+
if G_symmetric is True:
|
|
61
|
+
self.G_w = 0.5 * (G_w + np.transpose(G_w, axes=(0, 2, 1)))
|
|
62
|
+
else:
|
|
63
|
+
self.G_w = G_w
|
|
64
|
+
self.w = w
|
|
65
|
+
self.G_symmetric = G_symmetric
|
|
66
|
+
self.err = err
|
|
67
|
+
self.err_type = err_type
|
|
68
|
+
self.M = M
|
|
69
|
+
self.symmetry = symmetry
|
|
70
|
+
if symmetry is True and compute_const == True:
|
|
71
|
+
raise Exception("Set symmetry to be False to calculate the overall constant!")
|
|
72
|
+
self.compute_const = compute_const
|
|
73
|
+
if plane is not None:
|
|
74
|
+
self.plane = plane
|
|
75
|
+
elif self.symmetry is False:
|
|
76
|
+
self.plane = "z"
|
|
77
|
+
else:
|
|
78
|
+
self.plane = "w"
|
|
79
|
+
assert self.plane in ["z", "w"]
|
|
80
|
+
self.include_n0 = include_n0
|
|
81
|
+
self.k_max = k_max
|
|
82
|
+
self.ratio_max = ratio_max
|
|
83
|
+
|
|
84
|
+
#perform the first ESPRIT approximation to approximate Matsubara data
|
|
85
|
+
G_w_vector = self.G_w.reshape(-1, self.n_orb ** 2)
|
|
86
|
+
self.p_o = [ESPRIT(G_w_vector[:, i], self.w[0], self.w[-1], err=self.err, err_type=self.err_type, Lfactor=0.4) for i in range(self.n_orb ** 2)]
|
|
87
|
+
self.G_approx = [lambda x, idx=i: self.p_o[idx].get_value(x) for i in range(self.n_orb ** 2)]
|
|
88
|
+
idx_sigma = np.argmax([self.p_o[i].sigma for i in range(self.n_orb ** 2)])
|
|
89
|
+
self.S = self.p_o[idx_sigma].S
|
|
90
|
+
self.sigma = self.p_o[idx_sigma].sigma
|
|
91
|
+
if n0 == "auto":
|
|
92
|
+
assert isinstance(n0_shift, int) and n0_shift >= 0
|
|
93
|
+
p_o2 = [ESPRIT(G_w_vector[:, i], self.w[0], self.w[-1], err=self.err, err_type=self.err_type, Lfactor=0.5) for i in range(self.n_orb ** 2)]
|
|
94
|
+
w_cont = np.linspace(self.w[0], self.w[-1], 10 * self.w.size - 9)
|
|
95
|
+
G_L1 = [self.p_o[i].get_value(w_cont)[:-1].reshape(self.w.size - 1, 10) for i in range(self.n_orb ** 2)]
|
|
96
|
+
G_L2 = [ p_o2[i].get_value(w_cont)[:-1].reshape(self.w.size - 1, 10) for i in range(self.n_orb ** 2)]
|
|
97
|
+
self.err_max = max(max([self.p_o[i].err_max for i in range(self.n_orb ** 2)]), max([p_o2[i].err_max for i in range(self.n_orb ** 2)]))
|
|
98
|
+
G_L_diff = [np.abs(G_L2[i] - G_L1[i]).max(axis=1) for i in range(self.n_orb ** 2)]
|
|
99
|
+
ctrl_interval = [np.logical_and(G_L_diff[i][:-1] <= self.err_max, G_L_diff[i][0:-1] / G_L_diff[i][1:] < ratio_max) for i in range(self.n_orb ** 2)]
|
|
100
|
+
self.n0 = max([np.argmax(ctrl_interval[i]) for i in range(self.n_orb ** 2)]) + n0_shift
|
|
101
|
+
else:
|
|
102
|
+
assert isinstance(n0, int) and n0 >= 0
|
|
103
|
+
self.err_max = max([self.p_o[i].err_max for i in range(self.n_orb ** 2)])
|
|
104
|
+
self.n0 = n0
|
|
105
|
+
|
|
106
|
+
if self.symmetry is False:
|
|
107
|
+
#get the corresponding conformal mapping
|
|
108
|
+
w_m = 0.5 * (self.w[self.n0] + self.w[-1])
|
|
109
|
+
dw_h = 0.5 * (self.w[-1] - self.w[self.n0])
|
|
110
|
+
self.con_map = ConMapGeneric(w_m, dw_h)
|
|
111
|
+
#calculate contour integrals
|
|
112
|
+
self.cal_hk_generic(self.G_approx, k_max)
|
|
113
|
+
else:
|
|
114
|
+
#use complex poles to approximate Matsubara data in [1j * w[-1], +inf)
|
|
115
|
+
p = MiniPole(G_w, w, n0=n0, n0_shift=n0_shift, err=err, err_type=err_type, G_symmetric=G_symmetric, compute_const=compute_const, include_n0=False, k_max=k_max, ratio_max=ratio_max)
|
|
116
|
+
self.G_approx_tail = [lambda x, Al=p.pole_weight.reshape(-1, self.n_orb ** 2)[:, i], xl=p.pole_location: self.cal_G_scalar(1j * x, Al, xl) for i in range(self.n_orb ** 2)]
|
|
117
|
+
self.const = p.const
|
|
118
|
+
#get the corresponding conformal mapping
|
|
119
|
+
self.con_map = ConMapGapless(self.w[self.n0])
|
|
120
|
+
#calculate contour integrals
|
|
121
|
+
if G_symmetric is True:
|
|
122
|
+
self.cal_hk_gapless_symmetric(self.G_approx, self.G_approx_tail, k_max)
|
|
123
|
+
else:
|
|
124
|
+
self.cal_hk_gapless(self.G_approx, self.G_approx_tail, k_max)
|
|
125
|
+
|
|
126
|
+
#apply the second ESPRIT approximation to recover poles
|
|
127
|
+
self.find_poles()
|
|
128
|
+
|
|
129
|
+
def cal_hk_generic(self, G_approx, k_max = 999):
|
|
130
|
+
'''
|
|
131
|
+
Calculate the contour integrals.
|
|
132
|
+
'''
|
|
133
|
+
cutoff = self.err_max
|
|
134
|
+
err = 0.01 * cutoff
|
|
135
|
+
|
|
136
|
+
self.h_k = np.zeros((k_max, len(G_approx)), dtype=np.complex_)
|
|
137
|
+
for k in range(self.h_k.shape[0]):
|
|
138
|
+
for i in range(self.h_k.shape[1]):
|
|
139
|
+
self.h_k[k, i] = self.cal_hk_generic_indiv(G_approx[i], k, err)
|
|
140
|
+
if k >= 1:
|
|
141
|
+
cutoff_matrix = np.logical_and(np.abs(self.h_k[k]) < cutoff, np.abs(self.h_k[k - 1]) < cutoff)
|
|
142
|
+
if np.all(cutoff_matrix):
|
|
143
|
+
break
|
|
144
|
+
self.h_k = self.h_k[:(k + 1)]
|
|
145
|
+
|
|
146
|
+
def cal_hk_generic_indiv(self, G_approx, k, err):
|
|
147
|
+
if k % 2 == 0:
|
|
148
|
+
return (1.0j / np.pi) * integrate.quad(lambda x: G_approx(self.con_map.w_m + self.con_map.dw_h * np.sin(x)), -0.5 * np.pi, 0.5 * np.pi, weight="sin", wvar=k + 1, complex_func=True, epsabs=err, epsrel=err, limit=10000)[0]
|
|
149
|
+
else:
|
|
150
|
+
return (1.0 / np.pi) * integrate.quad(lambda x: G_approx(self.con_map.w_m + self.con_map.dw_h * np.sin(x)), -0.5 * np.pi, 0.5 * np.pi, weight="cos", wvar=k + 1, complex_func=True, epsabs=err, epsrel=err, limit=10000)[0]
|
|
151
|
+
|
|
152
|
+
def cal_hk_gapless_symmetric(self, G_approx_head, G_approx_tail, k_max = 999):
|
|
153
|
+
'''
|
|
154
|
+
Calculate the contour integrals.
|
|
155
|
+
'''
|
|
156
|
+
cutoff = self.err_max
|
|
157
|
+
err = 0.01 * cutoff
|
|
158
|
+
|
|
159
|
+
theta0 = np.arcsin(self.con_map.w_min / self.w[-1])
|
|
160
|
+
self.h_k = np.zeros((k_max, len(G_approx_head)), dtype=np.float_)
|
|
161
|
+
for k in range(self.h_k.shape[0]):
|
|
162
|
+
for i in range(self.h_k.shape[1]):
|
|
163
|
+
self.h_k[k, i] = self.cal_hk_gapless_symmetric_indiv(G_approx_head[i], k, err, theta0 + 1.e-12, 0.5 * np.pi) + \
|
|
164
|
+
self.cal_hk_gapless_symmetric_indiv(G_approx_tail[i], k, err, 1.e-6, theta0 - 1.e-12)
|
|
165
|
+
if k >= 1:
|
|
166
|
+
cutoff_matrix = np.logical_and(np.abs(self.h_k[k]) < cutoff, np.abs(self.h_k[k - 1]) < cutoff)
|
|
167
|
+
if np.all(cutoff_matrix):
|
|
168
|
+
break
|
|
169
|
+
self.h_k = self.h_k[:(k + 1)]
|
|
170
|
+
|
|
171
|
+
def cal_hk_gapless_symmetric_indiv(self, G_approx, k, err, theta_min, theta_max):
|
|
172
|
+
if k % 2 == 0:
|
|
173
|
+
return (-2.0 / np.pi) * integrate.quad(lambda x: G_approx(self.con_map.w_min / np.sin(x)).imag, theta_min, theta_max, weight="sin", wvar=k + 1, epsabs=err, epsrel=err, limit=10000)[0]
|
|
174
|
+
else:
|
|
175
|
+
return (+2.0 / np.pi) * integrate.quad(lambda x: G_approx(self.con_map.w_min / np.sin(x)).real, theta_min, theta_max, weight="cos", wvar=k + 1, epsabs=err, epsrel=err, limit=10000)[0]
|
|
176
|
+
|
|
177
|
+
def cal_hk_gapless(self, G_approx_head, G_approx_tail, k_max = 999):
|
|
178
|
+
'''
|
|
179
|
+
Calculate the contour integrals.
|
|
180
|
+
'''
|
|
181
|
+
cutoff = self.err_max
|
|
182
|
+
err = 0.01 * cutoff
|
|
183
|
+
|
|
184
|
+
theta0 = np.arcsin(self.con_map.w_min / self.w[-1])
|
|
185
|
+
self.h_k = np.zeros((k_max, len(G_approx_head)), dtype=np.complex_)
|
|
186
|
+
for k in range(self.h_k.shape[0]):
|
|
187
|
+
for i in range(self.n_orb):
|
|
188
|
+
for j in range(i, self.n_orb):
|
|
189
|
+
if i == j:
|
|
190
|
+
idx = i * self.n_orb + j
|
|
191
|
+
self.h_k[k, idx] = self.cal_hk_gapless_symmetric_indiv(G_approx_head[idx], k, err, theta0 + 1.e-12, 0.5 * np.pi) + \
|
|
192
|
+
self.cal_hk_gapless_symmetric_indiv(G_approx_tail[idx], k, err, 1.e-6, theta0 - 1.e-12)
|
|
193
|
+
else:
|
|
194
|
+
idx1 = i * self.n_orb + j
|
|
195
|
+
idx2 = j * self.n_orb + i
|
|
196
|
+
h1 = self.cal_hk_gapless_indiv(G_approx_head[idx1], k, err, theta0 + 1.e-12, 0.5 * np.pi) + \
|
|
197
|
+
self.cal_hk_gapless_indiv(G_approx_tail[idx1], k, err, 1.e-6, theta0 - 1.e-12)
|
|
198
|
+
h2 = self.cal_hk_gapless_indiv(G_approx_head[idx2], k, err, theta0 + 1.e-12, 0.5 * np.pi) + \
|
|
199
|
+
self.cal_hk_gapless_indiv(G_approx_tail[idx2], k, err, 1.e-6, theta0 - 1.e-12)
|
|
200
|
+
if k % 2 == 0:
|
|
201
|
+
self.h_k[k, idx1] = 1.0j / np.pi * (h1 - np.conjugate(h2))
|
|
202
|
+
self.h_k[k, idx2] = 1.0j / np.pi * (h2 - np.conjugate(h1))
|
|
203
|
+
else:
|
|
204
|
+
self.h_k[k, idx1] = 1.0 / np.pi * (h1 + np.conjugate(h2))
|
|
205
|
+
self.h_k[k, idx2] = 1.0 / np.pi * (h2 + np.conjugate(h1))
|
|
206
|
+
if k >= 1:
|
|
207
|
+
cutoff_matrix = np.logical_and(np.abs(self.h_k[k]) < cutoff, np.abs(self.h_k[k - 1]) < cutoff)
|
|
208
|
+
if np.all(cutoff_matrix):
|
|
209
|
+
break
|
|
210
|
+
self.h_k = self.h_k[:(k + 1)]
|
|
211
|
+
|
|
212
|
+
def cal_hk_gapless_indiv(self, G_approx, k, err, theta_min, theta_max):
|
|
213
|
+
if k % 2 == 0:
|
|
214
|
+
return integrate.quad(lambda x: G_approx(self.con_map.w_min / np.sin(x)), theta_min, theta_max, weight="sin", wvar=k + 1, complex_func=True, epsabs=err, epsrel=err, limit=10000)[0]
|
|
215
|
+
else:
|
|
216
|
+
return integrate.quad(lambda x: G_approx(self.con_map.w_min / np.sin(x)), theta_min, theta_max, weight="cos", wvar=k + 1, complex_func=True, epsabs=err, epsrel=err, limit=10000)[0]
|
|
217
|
+
|
|
218
|
+
def find_poles(self):
|
|
219
|
+
'''
|
|
220
|
+
Recover poles from contour integrals h_k.
|
|
221
|
+
'''
|
|
222
|
+
#apply the second ESPRIT
|
|
223
|
+
if self.M is None:
|
|
224
|
+
self.p_f = ESPRIT(self.h_k, err=self.err_max)
|
|
225
|
+
else:
|
|
226
|
+
self.p_f = ESPRIT(self.h_k, M=self.M)
|
|
227
|
+
|
|
228
|
+
#make sure all mapped poles are inside the unit disk
|
|
229
|
+
idx0 = np.abs(self.p_f.gamma) < 1.0
|
|
230
|
+
#tranform poles from w-plane to z-plane
|
|
231
|
+
location = self.con_map.z(self.p_f.gamma[idx0])
|
|
232
|
+
weight = self.p_f.omega[idx0] * self.con_map.dz(self.p_f.gamma[idx0]).reshape(-1, 1)
|
|
233
|
+
|
|
234
|
+
if self.compute_const is False:
|
|
235
|
+
self.const = 0.0
|
|
236
|
+
else:
|
|
237
|
+
G_w_approx = self.cal_G_vector(1j * self.w[self.n0:], weight, location)
|
|
238
|
+
const = (self.G_w[self.n0:] - G_w_approx.reshape(-1, self.n_orb, self.n_orb)).mean(axis=0)
|
|
239
|
+
self.const = const if np.abs(const).max() > 100.0 * self.err_max else 0.0
|
|
240
|
+
|
|
241
|
+
if self.plane == "z":
|
|
242
|
+
w_tmp = self.w if self.include_n0 else self.w[self.n0:]
|
|
243
|
+
G_w_tmp = self.G_w if self.include_n0 else self.G_w[self.n0:]
|
|
244
|
+
if self.symmetry is False:
|
|
245
|
+
w = w_tmp
|
|
246
|
+
G_w = G_w_tmp
|
|
247
|
+
else:
|
|
248
|
+
w = np.hstack((-w_tmp[::-1], w_tmp))
|
|
249
|
+
G_w = np.concatenate((np.conjugate(np.transpose(G_w_tmp, axes=(0, 2, 1)))[::-1], G_w_tmp), axis=0)
|
|
250
|
+
A = np.zeros((w.size, location.size), dtype=np.complex_)
|
|
251
|
+
for i in range(location.size):
|
|
252
|
+
A[:, i] = 1.0 / (1j * w - location[i])
|
|
253
|
+
weight, residuals, rank, s = np.linalg.lstsq(A, (G_w - self.const).reshape(-1, self.n_orb ** 2), rcond=-1)
|
|
254
|
+
self.lstsq_quality = (residuals, rank, s)
|
|
255
|
+
|
|
256
|
+
#discard poles with negligible weights
|
|
257
|
+
idx1 = np.abs(weight).max(axis=1) > self.err_max
|
|
258
|
+
weight = weight[idx1]
|
|
259
|
+
location = location[idx1]
|
|
260
|
+
|
|
261
|
+
#rearrange poles so that \xi_1.real <= \xi_2.real <= ... <= \xi_M.real
|
|
262
|
+
idx2 = np.argsort(location.real)
|
|
263
|
+
self.pole_weight = weight[idx2].reshape(-1, self.n_orb, self.n_orb)
|
|
264
|
+
self.pole_location = location[idx2]
|
|
265
|
+
|
|
266
|
+
@staticmethod
|
|
267
|
+
def cal_G_scalar(z, Al, xl):
|
|
268
|
+
G_z = 0.0
|
|
269
|
+
for i in range(xl.size):
|
|
270
|
+
G_z += Al[i] / (z - xl[i])
|
|
271
|
+
return G_z
|
|
272
|
+
|
|
273
|
+
@staticmethod
|
|
274
|
+
def cal_G_vector(z, Al, xl):
|
|
275
|
+
G_z = 0.0
|
|
276
|
+
for i in range(xl.size):
|
|
277
|
+
G_z += Al[[i]] / (z.reshape(-1, 1) - xl[i])
|
|
278
|
+
return G_z
|
|
279
|
+
|
|
280
|
+
def plot_spectrum(self, orb_list = None, w_min = -10, w_max = 10, epsilon = 0.01):
|
|
281
|
+
import matplotlib.pyplot as plt
|
|
282
|
+
|
|
283
|
+
w = np.linspace(w_min, w_max, 10000)
|
|
284
|
+
if orb_list is None:
|
|
285
|
+
orb_list = [(i, j) for i in range(self.n_orb) for j in range(self.n_orb)]
|
|
286
|
+
#dynamically generate colors, line styles, and markers based on the number of curves
|
|
287
|
+
num_curves = len(orb_list)
|
|
288
|
+
line_styles = ['-', '-.', ':', '--'] * (num_curves // 4 + 1)
|
|
289
|
+
plt.figure()
|
|
290
|
+
for idx, orb in enumerate(orb_list):
|
|
291
|
+
i, j = orb
|
|
292
|
+
gf = GreenFunc('F', 1.0, "discrete", A_i=self.pole_weight[:, i, j], x_i=self.pole_location)
|
|
293
|
+
A_r = gf.get_spectral(w, epsilon=epsilon)
|
|
294
|
+
plt.plot(w, A_r, linestyle=line_styles[idx], label="element (" + str(i) + ", " + str(j) + ")")
|
|
295
|
+
if self.h_k.shape[1] <= 16:
|
|
296
|
+
plt.legend()
|
|
297
|
+
plt.xlabel(r"$\omega$")
|
|
298
|
+
plt.ylabel(r"$A(\omega)$")
|
|
299
|
+
plt.show()
|
|
300
|
+
|
|
301
|
+
def check_valid(self):
|
|
302
|
+
import matplotlib.pyplot as plt
|
|
303
|
+
#dynamically generate colors, line styles, and markers based on the number of curves
|
|
304
|
+
num_curves = self.n_orb ** 2
|
|
305
|
+
line_styles = ['-', '-.', ':', '--'] * (num_curves // 4 + 1)
|
|
306
|
+
|
|
307
|
+
#check svd of the input data
|
|
308
|
+
plt.figure()
|
|
309
|
+
plt.semilogy(self.S, ".")
|
|
310
|
+
plt.semilogy([0, self.S.size - 1], [self.sigma, self.sigma], color="gray", linestyle="--", label="singular value")
|
|
311
|
+
plt.semilogy([0, self.S.size - 1], [self.err_max, self.err_max], color="k", label="precision")
|
|
312
|
+
plt.legend()
|
|
313
|
+
plt.xlabel(r"$n$")
|
|
314
|
+
plt.ylabel(r"$\sigma_n$")
|
|
315
|
+
plt.title("SVD of the input data")
|
|
316
|
+
plt.show()
|
|
317
|
+
|
|
318
|
+
#check the first approximation
|
|
319
|
+
plt.figure()
|
|
320
|
+
for i in range(self.n_orb ** 2):
|
|
321
|
+
row, col = i // self.n_orb, i % self.n_orb
|
|
322
|
+
G_w1 = self.G_approx[i](self.w) if self.symmetry is False else self.G_approx[i](self.w) + (self.const + np.zeros((self.n_orb, self.n_orb))).reshape(-1)[i]
|
|
323
|
+
plt.semilogy(self.w, np.abs(np.squeeze(G_w1) - self.G_w[:, row, col]), linestyle=line_styles[i], label="element (" + str(row) + ", " + str(col) + ")")
|
|
324
|
+
plt.semilogy([self.w[0], self.w[-1]], [self.err_max, self.err_max], color="k", label="precision")
|
|
325
|
+
if self.h_k.shape[1] <= 16:
|
|
326
|
+
plt.legend()
|
|
327
|
+
plt.xlabel(r"$\omega_n$")
|
|
328
|
+
plt.ylabel(r"$|\hat{G}(i\omega_n) - G(i\omega_n)|$")
|
|
329
|
+
plt.title("First approximation")
|
|
330
|
+
plt.show()
|
|
331
|
+
|
|
332
|
+
#check h_k
|
|
333
|
+
#part 1
|
|
334
|
+
plt.figure()
|
|
335
|
+
for i in range(self.n_orb ** 2):
|
|
336
|
+
row, col = i // self.n_orb, i % self.n_orb
|
|
337
|
+
plt.semilogy(np.abs(self.h_k[:, i]), '.', label="element (" + str(row) + ", " + str(col) + ")")
|
|
338
|
+
plt.semilogy([0, self.h_k.shape[0] - 1], [self.err_max, self.err_max], color="k", label="precision")
|
|
339
|
+
if self.h_k.shape[1] <= 16:
|
|
340
|
+
plt.legend()
|
|
341
|
+
plt.xlabel(r"$k$")
|
|
342
|
+
plt.ylabel(r"$h_k$")
|
|
343
|
+
plt.title("Contour integrals: value")
|
|
344
|
+
plt.show()
|
|
345
|
+
#part 2
|
|
346
|
+
plt.figure()
|
|
347
|
+
plt.semilogy(self.p_f.S, ".")
|
|
348
|
+
if self.M is not None:
|
|
349
|
+
plt.semilogy([0, self.p_f.S.size - 1], [self.p_f.S[self.M], self.p_f.S[self.M]], color="gray", linestyle="--", label="M poles")
|
|
350
|
+
plt.semilogy([0, self.p_f.S.size - 1], [self.err_max, self.err_max], color="k", label="precision")
|
|
351
|
+
plt.legend()
|
|
352
|
+
plt.xlabel(r"$n$")
|
|
353
|
+
plt.ylabel(r"$\sigma_n$")
|
|
354
|
+
plt.title("Contour integrals: SVD")
|
|
355
|
+
plt.show()
|
|
356
|
+
#part 3
|
|
357
|
+
plt.figure()
|
|
358
|
+
h_k_approx = self.p_f.get_value(np.linspace(0, 1, self.h_k.shape[0]))
|
|
359
|
+
for i in range(self.n_orb ** 2):
|
|
360
|
+
row, col = i // self.n_orb, i % self.n_orb
|
|
361
|
+
plt.semilogy(np.abs(h_k_approx[:, i] - self.h_k[:, i]), '.', label="element (" + str(row) + ", " + str(col) + ")")
|
|
362
|
+
if self.M is not None:
|
|
363
|
+
plt.semilogy([0, self.h_k.shape[0] - 1], [self.p_f.S[self.M], self.p_f.S[self.M]], color="gray", linestyle="--", label="M poles")
|
|
364
|
+
else:
|
|
365
|
+
plt.semilogy([0, self.h_k.shape[0] - 1], [self.err_max, self.err_max], color="k", label="precision")
|
|
366
|
+
if self.h_k.shape[1] <= 16:
|
|
367
|
+
plt.legend()
|
|
368
|
+
plt.xlabel(r"$k$")
|
|
369
|
+
plt.ylabel(r"$|\hat{h}_k - h_k|$")
|
|
370
|
+
plt.title("Contour integrals: approximation")
|
|
371
|
+
plt.show()
|
|
372
|
+
|
|
373
|
+
#check the final approximation
|
|
374
|
+
plt.figure()
|
|
375
|
+
G_w2 = self.cal_G_vector(1j * self.w, self.pole_weight.reshape(-1, self.n_orb ** 2), self.pole_location).reshape(-1, self.n_orb, self.n_orb) + self.const
|
|
376
|
+
for i in range(self.n_orb ** 2):
|
|
377
|
+
row, col = i // self.n_orb, i % self.n_orb
|
|
378
|
+
plt.semilogy(self.w, np.abs(G_w2[:, row, col] - self.G_w[:, row, col]), linestyle=line_styles[i], label="element (" + str(row) + ", " + str(col) + ")")
|
|
379
|
+
if self.M is not None:
|
|
380
|
+
plt.semilogy([self.w[0], self.w[-1]], [self.p_f.S[self.M], self.p_f.S[self.M]], color="gray", linestyle="--", label="M poles")
|
|
381
|
+
else:
|
|
382
|
+
plt.semilogy([self.w[0], self.w[-1]], [self.err_max, self.err_max], color="k", label="precision")
|
|
383
|
+
if self.h_k.shape[1] <= 16:
|
|
384
|
+
plt.legend()
|
|
385
|
+
plt.xlabel(r"$\omega_n$")
|
|
386
|
+
plt.ylabel(r"$|\hat{G}(i\omega_n) - G(i\omega_n)|$")
|
|
387
|
+
plt.title("Final approximation")
|
|
388
|
+
plt.show()
|
|
389
|
+
|
|
390
|
+
from matplotlib.colors import LinearSegmentedColormap
|
|
391
|
+
colors = [(1, 1, 1), (0, 0, 1)] #(R, G, B) tuples for white and blue
|
|
392
|
+
n_bins = 100 #Discretize the interpolation into bins
|
|
393
|
+
cmap_name = "WtBu"
|
|
394
|
+
cmap = LinearSegmentedColormap.from_list(cmap_name, colors, N=n_bins) #Create the colormap
|
|
395
|
+
|
|
396
|
+
#check pole locations
|
|
397
|
+
pts = self.pole_location
|
|
398
|
+
scatter = plt.scatter(pts.real, pts.imag, c=np.linalg.norm(self.pole_weight.reshape(-1, self.n_orb ** 2), axis=1), vmin=0, vmax=1, cmap=cmap)
|
|
399
|
+
cbar = plt.colorbar(scatter)
|
|
400
|
+
cbar.set_label('weight')
|
|
401
|
+
x_max = np.abs(self.pole_location.real).max() * 1.2
|
|
402
|
+
y_max = max(np.abs(self.pole_location.imag).max() * 1.2, 1.0)
|
|
403
|
+
plt.xlim([-x_max, x_max])
|
|
404
|
+
plt.ylim([-y_max, y_max])
|
|
405
|
+
plt.xlabel(r"Real($z$)")
|
|
406
|
+
plt.ylabel(r"Imag($z$)")
|
|
407
|
+
plt.show()
|
|
408
|
+
|
|
409
|
+
#check mapped pole locations
|
|
410
|
+
theta = np.arange(1001) * 2.0 * np.pi / 1000
|
|
411
|
+
pts = self.con_map.w(self.pole_location)
|
|
412
|
+
plt.plot(np.cos(theta), np.sin(theta), color="tab:orange")
|
|
413
|
+
scatter = plt.scatter(pts.real, pts.imag, c=np.linalg.norm(self.pole_weight.reshape(-1, self.n_orb ** 2), axis=1), vmin=0, vmax=1, cmap=cmap)
|
|
414
|
+
cbar = plt.colorbar(scatter)
|
|
415
|
+
cbar.set_label('weight')
|
|
416
|
+
plt.xlim([-1.05, 1.05])
|
|
417
|
+
plt.ylim([-1.05, 1.05])
|
|
418
|
+
plt.xlabel(r"Real($w$)")
|
|
419
|
+
plt.ylabel(r"Imag($w$)")
|
|
420
|
+
plt.show()
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from .esprit import ESPRIT
|
|
3
|
+
from .con_map import ConMapGeneric, ConMapGapless
|
|
4
|
+
|
|
5
|
+
class MiniPoleDLR:
|
|
6
|
+
'''
|
|
7
|
+
A Python program implementing the MPM-DLR algorithm.
|
|
8
|
+
'''
|
|
9
|
+
def __init__(self, Al_dlr, xl_dlr, beta, n0, nmax = None, err = None, err_type = "abs", M = None, symmetry = False, k_max=200, Lfactor = 0.4):
|
|
10
|
+
'''
|
|
11
|
+
Al_dlr (numpy.ndarray): DLR coefficients, either of shape (r,) or (r, n_orb, n_orb).
|
|
12
|
+
xl_dlr (numpy.ndarray): DLR grid for the real frequency, an array of shape (r,).
|
|
13
|
+
beta (float): Inverse temperature of the system (1/kT).
|
|
14
|
+
n0 (int): Number of initial points to discard, typically in the range (0, 10).
|
|
15
|
+
nmax (int): Cutoff for the Matsubara frequency when symmetry is False.
|
|
16
|
+
err (float): Error tolerance for calculations.
|
|
17
|
+
err_type (str): Specifies the type of error, "abs" for absolute error or "rel" for relative error.
|
|
18
|
+
M (int): Specifies the number of poles to be recovered.
|
|
19
|
+
symmetry (bool): Whether to impose up-down symmetry (True or False).
|
|
20
|
+
k_max (int): Number of moments to be calculated.
|
|
21
|
+
Lfactor (float): Ratio of L/N in the ESPRIT algorithm.
|
|
22
|
+
'''
|
|
23
|
+
#make sure Al_dlr is of size (r, n_orb, n_orb)
|
|
24
|
+
if Al_dlr.ndim == 1:
|
|
25
|
+
Al_dlr = Al_dlr.reshape(-1, 1, 1)
|
|
26
|
+
assert Al_dlr.ndim == 3
|
|
27
|
+
assert Al_dlr.shape[0] == xl_dlr.size and Al_dlr.shape[1] == Al_dlr.shape[2]
|
|
28
|
+
n_orb = Al_dlr.shape[1]
|
|
29
|
+
|
|
30
|
+
# Construct holomorphic mapping
|
|
31
|
+
w_n0 = (2 * n0 + 1) * np.pi / beta
|
|
32
|
+
if symmetry is False:
|
|
33
|
+
nmax = beta if nmax is None else nmax
|
|
34
|
+
w_nmax = (2 * nmax + 1) * np.pi / beta
|
|
35
|
+
w_m = 0.5 * (w_n0 + w_nmax)
|
|
36
|
+
dw_h = 0.5 * (w_nmax - w_n0)
|
|
37
|
+
self.con_map = ConMapGeneric(w_m, dw_h)
|
|
38
|
+
else:
|
|
39
|
+
self.con_map = ConMapGapless(w_n0)
|
|
40
|
+
|
|
41
|
+
# Calculate contour integral
|
|
42
|
+
self.xl_p = self.con_map.w(xl_dlr)
|
|
43
|
+
V = np.vander(self.xl_p, N = min(int((self.xl_p.size + 1) / Lfactor), k_max), increasing=True).T
|
|
44
|
+
self.Al_p = Al_dlr.reshape(-1, n_orb ** 2) / self.con_map.dz(self.xl_p).reshape(-1, 1)
|
|
45
|
+
self.h_k = V @ self.Al_p
|
|
46
|
+
|
|
47
|
+
# Extract pole information:
|
|
48
|
+
# 1) Pole weights are stored in a numpy array of shape (M,) for single-orbital systems,
|
|
49
|
+
# or in an array of shape (M, n_orb, n_orb) for multi-orbital systems.
|
|
50
|
+
# 2) Pole locations are stored in a numpy array of shape (M,).
|
|
51
|
+
self.p = ESPRIT(self.h_k, err=err, err_type=err_type, M=M, Lfactor=Lfactor)
|
|
52
|
+
idx = np.abs(self.p.gamma) < 1
|
|
53
|
+
Al = self.p.omega[idx] * self.con_map.dz(self.p.gamma[idx]).reshape(-1, 1)
|
|
54
|
+
if n_orb == 1:
|
|
55
|
+
weight = Al[:, 0]
|
|
56
|
+
else:
|
|
57
|
+
weight = Al.reshape(-1, n_orb, n_orb)
|
|
58
|
+
location = self.con_map.z(self.p.gamma[idx])
|
|
59
|
+
#rearrange poles so that \xi_1.real <= \xi_2.real <= ... <= \xi_M.real
|
|
60
|
+
idx2 = np.argsort(location.real)
|
|
61
|
+
self.pole_weight = weight[idx2]
|
|
62
|
+
self.pole_location = location[idx2]
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.special import ellipk
|
|
3
|
+
|
|
4
|
+
'''
|
|
5
|
+
Function tools for testing the performance of Prony Analytic Continuation.
|
|
6
|
+
'''
|
|
7
|
+
def semi_circle(w, w0 = 0.0, r = 1.0):
|
|
8
|
+
'''
|
|
9
|
+
A semicircle function with the center being w0 and the radius being r.
|
|
10
|
+
'''
|
|
11
|
+
assert r > 0.0
|
|
12
|
+
if np.abs(w - w0) >= r:
|
|
13
|
+
return 0.0
|
|
14
|
+
else:
|
|
15
|
+
return (2.0 / (np.pi * r**2.0)) * np.sqrt(r**2.0 - (w - w0)**2.0)
|
|
16
|
+
|
|
17
|
+
def rectangle(w, w0 = 0.0, dw_h = 0.5):
|
|
18
|
+
'''
|
|
19
|
+
A rectangle function with the center being w0 and the half-width being dw_h.
|
|
20
|
+
'''
|
|
21
|
+
assert dw_h > 0.0
|
|
22
|
+
if np.abs(w - w0) >= dw_h:
|
|
23
|
+
return 0.0
|
|
24
|
+
else:
|
|
25
|
+
return 1.0 / (2.0 * dw_h)
|
|
26
|
+
|
|
27
|
+
def triangle(w, w0 = 0.0, dw_h = 1.0):
|
|
28
|
+
'''
|
|
29
|
+
A triangle function with the center being w0 and the half-length of the bottom edge being dw_h.
|
|
30
|
+
'''
|
|
31
|
+
assert dw_h > 0.0
|
|
32
|
+
if np.abs(w - w0) >= dw_h:
|
|
33
|
+
return 0.0
|
|
34
|
+
else:
|
|
35
|
+
h = 1.0 / dw_h
|
|
36
|
+
return h * (1.0 - np.abs(w - w0) / dw_h)
|
|
37
|
+
|
|
38
|
+
def gaussian(w, mu = 0.0, sigma = 1.0):
|
|
39
|
+
'''
|
|
40
|
+
A Gaussian function with the expected value mu and the standard deviation sigma.
|
|
41
|
+
'''
|
|
42
|
+
return 1.0 / (np.sqrt(2.0 * np.pi) * sigma) * np.exp(-0.5 * ((w - mu) / sigma)**2.0)
|
|
43
|
+
|
|
44
|
+
def lorentzian(w, w0 = 0.0, half_width = 1.0):
|
|
45
|
+
'''
|
|
46
|
+
A Lorentzian function with the center being w0 and the half-width at half-maximum being half-width.
|
|
47
|
+
'''
|
|
48
|
+
return 1.0 / np.pi * half_width / ((w - w0)**2.0 + half_width**2.0)
|
|
49
|
+
|
|
50
|
+
def bethe_lattice_dos_indiv(x, t = 1.0):
|
|
51
|
+
'''
|
|
52
|
+
Intermediate function of bethe_lattice_dos(w, t) which only works for a single point.
|
|
53
|
+
'''
|
|
54
|
+
if np.abs(x) > 2 * t:
|
|
55
|
+
return 0.0
|
|
56
|
+
else:
|
|
57
|
+
return 1.0 / (2.0 * np.pi * t**2.0) * np.sqrt(4 * t**2.0 - x**2.0)
|
|
58
|
+
|
|
59
|
+
def bethe_lattice_dos(w, t = 1.0):
|
|
60
|
+
'''
|
|
61
|
+
Density of states for the tight-binding model in Bethe lattice with interaction t.
|
|
62
|
+
'''
|
|
63
|
+
return np.vectorize(lambda x: bethe_lattice_dos_indiv(x, t))(w)
|
|
64
|
+
|
|
65
|
+
def bethe_lattice_G(w_n, t = 1.0):
|
|
66
|
+
'''
|
|
67
|
+
Matsubara Green's function for the tight-binding model in Bethe lattice with interaction t.
|
|
68
|
+
w_n is the imaginary part of the Matsubara Green's function.
|
|
69
|
+
'''
|
|
70
|
+
assert np.linalg.norm(w_n.imag) == 0.0
|
|
71
|
+
assert np.all(w_n > 0.0)
|
|
72
|
+
return 1.0j / (2.0 * t**2.0) * (w_n - np.sqrt(w_n**2.0 + 4.0 * t**2.0))
|
|
73
|
+
|
|
74
|
+
def square_lattice_dos_indiv(x, t = 1.0):
|
|
75
|
+
'''
|
|
76
|
+
Intermediate function of square_lattice_dos(w, t) which only works for a single point.
|
|
77
|
+
'''
|
|
78
|
+
if np.abs(x) > 4 * t:
|
|
79
|
+
return 0.0
|
|
80
|
+
else:
|
|
81
|
+
return 1.0 / (2.0 * np.pi**2 * t) * ellipk(1 - (x / (4 * t))**2)
|
|
82
|
+
|
|
83
|
+
def square_lattice_dos(w, t = 1.0):
|
|
84
|
+
'''
|
|
85
|
+
Density of states for the tight-binding model in square lattice with nearest-neighbor interaction t.
|
|
86
|
+
'''
|
|
87
|
+
return np.vectorize(lambda x: square_lattice_dos_indiv(x, t))(w)
|
|
88
|
+
|
|
89
|
+
def square_lattice_G(w_n, t = 1.0):
|
|
90
|
+
'''
|
|
91
|
+
Matsubara Green's function for the tight-binding model in square lattice with nearest-neighbor interaction t.
|
|
92
|
+
w_n is the imaginary part of the Matsubara Green's function.
|
|
93
|
+
'''
|
|
94
|
+
assert np.linalg.norm(w_n.imag) == 0.0
|
|
95
|
+
return -1.0j / (2.0 * np.pi * t * np.sqrt(1 + (w_n / (4.0 * t))**2.0)) * ellipk(1.0 / (1.0 + (w_n / (4.0 * t))**2.0))
|
|
96
|
+
|
|
97
|
+
def triangle_lattice_dos_indiv(x, t = 1.0, tp = 0.5):
|
|
98
|
+
'''
|
|
99
|
+
Intermediate function of triangle_lattice_dos(w, t, tp) which only works for a single point.
|
|
100
|
+
'''
|
|
101
|
+
assert tp != 0.0
|
|
102
|
+
u = t / tp
|
|
103
|
+
E = x / t
|
|
104
|
+
assert u >= 0.0
|
|
105
|
+
|
|
106
|
+
w_min = -4.0 - 2.0 / u
|
|
107
|
+
w_max = (u + 2.0 / u if u <= 2.0 else 4.0 - 2.0 / u)
|
|
108
|
+
|
|
109
|
+
if E < w_min or E > w_max:
|
|
110
|
+
return 0.0
|
|
111
|
+
|
|
112
|
+
r = u * np.sqrt(u**2.0 - E * u + 2.0)
|
|
113
|
+
p = 4.0 * r
|
|
114
|
+
q = (r - u**2.0)**2.0 * (r**2.0 - 4.0 * u**2.0 + 2.0 * r * u**2.0 + u ** 4.0) / (4.0 * u**4.0)
|
|
115
|
+
|
|
116
|
+
if q < 0.0:
|
|
117
|
+
z0 = p - q
|
|
118
|
+
z1 = p
|
|
119
|
+
elif q < p:
|
|
120
|
+
z0 = p
|
|
121
|
+
z1 = p - q
|
|
122
|
+
else:
|
|
123
|
+
z0 = q
|
|
124
|
+
z1 = q - p
|
|
125
|
+
|
|
126
|
+
return 1.0 / (np.pi**2.0 * tp * np.sqrt(z0)) * ellipk(z1 / z0)
|
|
127
|
+
|
|
128
|
+
def triangle_lattice_dos(w, t = 1.0, tp = 0.5):
|
|
129
|
+
'''
|
|
130
|
+
Density of states for the tight-binding model in triangular lattice with anisotropic nearest-neighbor interactions t and tp.
|
|
131
|
+
'''
|
|
132
|
+
return np.vectorize(lambda x: triangle_lattice_dos_indiv(x, t, tp))(w)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 lzphy
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: mini_pole
|
|
3
|
+
Version: 0.2
|
|
4
|
+
Summary: The Python code provided implements the matrix-valued version of the Minimal Pole Method (MPM) as described in arXiv:2410.14000.
|
|
5
|
+
Home-page: https://github.com/Green-Phys/MiniPole
|
|
6
|
+
Author: Lei Zhang
|
|
7
|
+
Author-email: lzphy@umich.edu
|
|
8
|
+
License: MIT
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Requires-Python: >=3.8
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
License-File: LICENSE
|
|
15
|
+
Requires-Dist: numpy>=1.21.0
|
|
16
|
+
Requires-Dist: scipy>=1.7.0
|
|
17
|
+
|
|
18
|
+
# 1. MiniPole
|
|
19
|
+
The Python code provided implements the matrix-valued version of the Minimal Pole Method (MPM) as described in [arXiv:2410.14000](https://arxiv.org/abs/2410.14000), extending the scalar-valued method introduced in [Phys. Rev. B 110, 035154 (2024)](https://doi.org/10.1103/PhysRevB.110.035154).
|
|
20
|
+
|
|
21
|
+
The input of the simulation is the Matsubara data $G(i \omega_n)$ sampled on a uniform grid $\lbrace i\omega_{0}, i\omega_{1}, \cdots, i\omega_{n_{\omega}-1} \rbrace$, where $\omega_n=\frac{(2n+1)\pi}{\beta}$ for fermions and $\frac{2n\pi}{\beta}$ for bosons, and $n_{\omega}$ is the total number of sampling points.
|
|
22
|
+
|
|
23
|
+
## i) The standard MPM is performed using the following command:
|
|
24
|
+
|
|
25
|
+
**p = MiniPole(G_w, w, n0 = "auto", n0_shift = 0, err = None, err_type = "abs", M = None, symmetry = False, G_symmetric = False, compute_const = False, plane = None, include_n0 = True, k_max = 999, ratio_max = 10)**
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
1. G_w : ndarray
|
|
30
|
+
An (n_w, n_orb, n_orb) or (n_w,) array containing the Matsubara data.
|
|
31
|
+
2. w : ndarray
|
|
32
|
+
An (n_w,) array containing the corresponding real-valued Matsubara grid.
|
|
33
|
+
3. n0 : int or str, default="auto"
|
|
34
|
+
If "auto", n0 is automatically selected with an additional shift specified by n0_shift.
|
|
35
|
+
If a non-negative integer is provided, n0 is fixed at that value.
|
|
36
|
+
4. n0_shift : int, default=0
|
|
37
|
+
The shift applied to the automatically determined n0.
|
|
38
|
+
5. err : float
|
|
39
|
+
Error tolerance for calculations.
|
|
40
|
+
6. err_type : str, default="abs"
|
|
41
|
+
Specifies the type of error: "abs" for absolute error or "rel" for relative error.
|
|
42
|
+
7. M : int, optional
|
|
43
|
+
The number of poles in the final result. If not specified, the precision from the first ESPRIT is used to extract poles in the second ESPRIT.
|
|
44
|
+
8. symmetry : bool, default=False
|
|
45
|
+
Determines whether to preserve up-down symmetry.
|
|
46
|
+
9. G_symmetric : bool, default=False
|
|
47
|
+
If True, the Matsubara data will be symmetrized such that G_{ij}(z) = G_{ji}(z).
|
|
48
|
+
10. compute_const : bool, default=False
|
|
49
|
+
Determines whether to compute the constant term in G(z) = sum_l Al / (z - xl) + const.
|
|
50
|
+
If False, the constant term is fixed at 0.
|
|
51
|
+
11. plane : str, optional
|
|
52
|
+
Specifies whether to use the original z-plane or the mapped w-plane to compute pole weights.
|
|
53
|
+
12. include_n0 : bool, default=True
|
|
54
|
+
Determines whether to include the first n0 input points when weights are calculated in the z-plane.
|
|
55
|
+
13. k_max : int, default=999
|
|
56
|
+
The maximum number of contour integrals.
|
|
57
|
+
14. ratio_max : float, default=10
|
|
58
|
+
The maximum ratio of oscillation when automatically choosing n0.
|
|
59
|
+
|
|
60
|
+
Returns
|
|
61
|
+
-------
|
|
62
|
+
Minimal pole representation of the given data.
|
|
63
|
+
Pole weights are stored in p.pole_weight, a numpy array of shape (M, n_orb, n_orb).
|
|
64
|
+
Shared pole locations are stored in p.pole_location, a numpy array of shape (M,).
|
|
65
|
+
|
|
66
|
+
## ii) The MPM-DLR algorithm is performed using the following command:
|
|
67
|
+
|
|
68
|
+
**p = MiniPoleDLR(Al_dlr, xl_dlr, beta, n0, nmax = None, err = None, err_type = "abs", M = None, symmetry = False, k_max=200, Lfactor = 0.4)**
|
|
69
|
+
|
|
70
|
+
Parameters
|
|
71
|
+
----------
|
|
72
|
+
1. Al_dlr (numpy.ndarray): DLR coefficients, either of shape (r,) or (r, n_orb, n_orb).
|
|
73
|
+
2. xl_dlr (numpy.ndarray): DLR grid for the real frequency, an array of shape (r,).
|
|
74
|
+
3. beta (float): Inverse temperature of the system (1/kT).
|
|
75
|
+
4. n0 (int): Number of initial points to discard, typically in the range (0, 10).
|
|
76
|
+
5. nmax (int): Cutoff for the Matsubara frequency when symmetry is False.
|
|
77
|
+
6. err (float): Error tolerance for calculations.
|
|
78
|
+
7. err_type (str): Specifies the type of error, "abs" for absolute error or "rel" for relative error.
|
|
79
|
+
8. M (int): Specifies the number of poles to be recovered.
|
|
80
|
+
9. symmetry (bool): Whether to impose up-down symmetry (True or False).
|
|
81
|
+
10. k_max (int): Number of moments to be calculated.
|
|
82
|
+
11. Lfactor (float): Ratio of L/N in the ESPRIT algorithm.
|
|
83
|
+
|
|
84
|
+
Returns
|
|
85
|
+
-------
|
|
86
|
+
Minimal pole representation of the given data.
|
|
87
|
+
Pole weights are stored in p.pole_weight, a numpy array of shape (M, n_orb, n_orb).
|
|
88
|
+
Shared pole locations are stored in p.pole_location, a numpy array of shape (M,).
|
|
89
|
+
|
|
90
|
+
# 2. Examples
|
|
91
|
+
|
|
92
|
+
The scripts in the *examples* folder demonstrate the usage of MPM and MPM-DLR.
|
|
93
|
+
|
|
94
|
+
## i) MPM-DLR Algorithm
|
|
95
|
+
|
|
96
|
+
The *examples/MPM_DLR* folder contains scripts to recover the band structure of Si, as shown in the middle panel of Fig. 8 in [arXiv:2410.14000](https://arxiv.org/abs/2410.14000).
|
|
97
|
+
|
|
98
|
+
### Steps:
|
|
99
|
+
|
|
100
|
+
a) Download the input data file [Si_dlr.h5](https://drive.google.com/file/d/1_bNvbgOHewiujHYEcf-CCpGxlZP9cRw_/view?usp=drive_link) to the *examples/MPM_DLR/* directory.
|
|
101
|
+
|
|
102
|
+
b) Obtain the recovered poles by running **python3 cal_band_dlr.py --obs=`<option>`**, where **`<option>`** can be "S" (self-energy), "Gii" (scalar-valued Green's function), or "G" (matrix-valued Green's function).
|
|
103
|
+
|
|
104
|
+
c) Plot the band structure by running **python3 plt_band_dlr.py --obs=`<option>`**.
|
|
105
|
+
|
|
106
|
+
### Note:
|
|
107
|
+
|
|
108
|
+
a) Reference runtime on a single core of a laptop (using the M1 Max Apple chip as an example): 13 seconds for "Gii" and 160 seconds for both "G" and "S".
|
|
109
|
+
|
|
110
|
+
b) Parallel computation is supported in **cal_band_dlr.py** to speed up the process on multiple cores. Use the following command: **mpirun -n `<num_cores>` python3 cal_band_dlr.py --obs=`<option>`**, where **`<num_cores>`** is the number of cores and **`<option>`** is "S," "Gii," or "G".
|
|
111
|
+
|
|
112
|
+
c) Full Parameters for **cal_band_dlr.py**:
|
|
113
|
+
|
|
114
|
+
- `--obs` (str): Observation type used in the script. Default is `"S"`.
|
|
115
|
+
- `--n0` (int): Parameter $n_0$ as described in [arXiv:2410.14000](https://arxiv.org/abs/2410.14000).
|
|
116
|
+
- `--err` (float): Error tolerance for computations. Default is `1.e-10`.
|
|
117
|
+
- `--symmetry` (bool): Specifies whether to preserve up-down symmetry in calculations.
|
|
118
|
+
|
|
119
|
+
d) Full Parameters for **plt_band_dlr.py**:
|
|
120
|
+
|
|
121
|
+
- `--obs` (str): Observation type used in the script. Default is `"S"`.
|
|
122
|
+
- `--w_min` (float): Lower bound of the real frequency in eV. Default is `-12`.
|
|
123
|
+
- `--w_max` (float): Upper bound of the real frequency in eV. Default is `12`.
|
|
124
|
+
- `--n_w` (int): Number of frequencies between `w_min` and `w_max`. Default is `200`.
|
|
125
|
+
- `--eta` (float): Broadening parameter. Default is `0.005`.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
mini_pole/__init__.py,sha256=mC5Npdatb7XBt_fcvpiHu2HBQozTdkIYE6OygQI_74E,125
|
|
2
|
+
mini_pole/con_map.py,sha256=TfrYc713gR0P9-zWuZ2m8f4r_0bPHd7dFewRysoNPxk,4629
|
|
3
|
+
mini_pole/esprit.py,sha256=8yijPCwXax-RTgUDzpqhx5hW3XdMP18JI1VH_BaKOzI,6458
|
|
4
|
+
mini_pole/green_func.py,sha256=SqeUveDelETGVFZDqFahhbTMS2-HjCV_6KPvk9VT-Jc,4292
|
|
5
|
+
mini_pole/mini_pole.py,sha256=LxFYyINjLQ4Ncv2LwYTYkUYbaXBFVFbNvq9E9ZYa5iE,21704
|
|
6
|
+
mini_pole/mini_pole_dlr.py,sha256=AExes9x_2_yNNNRmdqdXG-f07dvxXT1so6_xMRWO1ag,3090
|
|
7
|
+
mini_pole/spectrum_example.py,sha256=TM_hCyi8BZaSYxMBQHfqdThDKCJJoNlZZlAODuMl9Kc,4240
|
|
8
|
+
mini_pole-0.2.dist-info/LICENSE,sha256=fX-FwYOyRqGGw9goJuT-s31vziK6_uKlEaYoPqc-NZE,1062
|
|
9
|
+
mini_pole-0.2.dist-info/METADATA,sha256=EoP_oa3l20E8i9gHP1hBkKXLhOReDWIGfJjRZbd5Scc,6945
|
|
10
|
+
mini_pole-0.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
|
11
|
+
mini_pole-0.2.dist-info/top_level.txt,sha256=5dwIWQGLxZqDey12gK5BkbMMNGrykqXAcBWf0Cx48Ds,10
|
|
12
|
+
mini_pole-0.2.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
mini_pole
|