tequila-basic 1.9.8__py3-none-any.whl → 1.9.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tequila/__init__.py +29 -14
- tequila/apps/__init__.py +14 -5
- tequila/apps/_unary_state_prep_impl.py +145 -112
- tequila/apps/adapt/__init__.py +9 -1
- tequila/apps/adapt/adapt.py +154 -113
- tequila/apps/krylov/__init__.py +1 -1
- tequila/apps/krylov/krylov.py +23 -21
- tequila/apps/robustness/helpers.py +10 -6
- tequila/apps/robustness/interval.py +238 -156
- tequila/apps/unary_state_prep.py +29 -23
- tequila/autograd_imports.py +8 -5
- tequila/circuit/__init__.py +2 -1
- tequila/circuit/_gates_impl.py +135 -67
- tequila/circuit/circuit.py +177 -88
- tequila/circuit/compiler.py +114 -105
- tequila/circuit/gates.py +288 -120
- tequila/circuit/gradient.py +35 -23
- tequila/circuit/noise.py +83 -74
- tequila/circuit/postselection.py +120 -0
- tequila/circuit/pyzx.py +10 -6
- tequila/circuit/qasm.py +201 -83
- tequila/circuit/qpic.py +63 -61
- tequila/grouping/binary_rep.py +148 -146
- tequila/grouping/binary_utils.py +84 -75
- tequila/grouping/compile_groups.py +334 -230
- tequila/grouping/ev_utils.py +77 -41
- tequila/grouping/fermionic_functions.py +383 -308
- tequila/grouping/fermionic_methods.py +170 -123
- tequila/grouping/overlapping_methods.py +69 -52
- tequila/hamiltonian/paulis.py +12 -13
- tequila/hamiltonian/paulistring.py +1 -1
- tequila/hamiltonian/qubit_hamiltonian.py +45 -35
- tequila/ml/__init__.py +1 -0
- tequila/ml/interface_torch.py +19 -16
- tequila/ml/ml_api.py +11 -10
- tequila/ml/utils_ml.py +12 -11
- tequila/objective/__init__.py +8 -3
- tequila/objective/braket.py +55 -47
- tequila/objective/objective.py +91 -56
- tequila/objective/qtensor.py +36 -27
- tequila/optimizers/__init__.py +31 -23
- tequila/optimizers/_containers.py +11 -7
- tequila/optimizers/optimizer_base.py +111 -83
- tequila/optimizers/optimizer_gd.py +258 -231
- tequila/optimizers/optimizer_gpyopt.py +56 -42
- tequila/optimizers/optimizer_scipy.py +157 -112
- tequila/quantumchemistry/__init__.py +66 -38
- tequila/quantumchemistry/chemistry_tools.py +394 -203
- tequila/quantumchemistry/encodings.py +121 -13
- tequila/quantumchemistry/madness_interface.py +170 -96
- tequila/quantumchemistry/orbital_optimizer.py +86 -40
- tequila/quantumchemistry/psi4_interface.py +166 -97
- tequila/quantumchemistry/pyscf_interface.py +70 -23
- tequila/quantumchemistry/qc_base.py +866 -414
- tequila/simulators/__init__.py +0 -3
- tequila/simulators/simulator_api.py +258 -106
- tequila/simulators/simulator_aqt.py +102 -0
- tequila/simulators/simulator_base.py +156 -55
- tequila/simulators/simulator_cirq.py +58 -42
- tequila/simulators/simulator_cudaq.py +600 -0
- tequila/simulators/simulator_ddsim.py +390 -0
- tequila/simulators/simulator_mqp.py +30 -0
- tequila/simulators/simulator_pyquil.py +190 -171
- tequila/simulators/simulator_qibo.py +95 -87
- tequila/simulators/simulator_qiskit.py +124 -114
- tequila/simulators/simulator_qlm.py +52 -26
- tequila/simulators/simulator_qulacs.py +85 -59
- tequila/simulators/simulator_spex.py +464 -0
- tequila/simulators/simulator_symbolic.py +6 -5
- tequila/simulators/test_spex_simulator.py +208 -0
- tequila/tools/convenience.py +4 -4
- tequila/tools/qng.py +72 -64
- tequila/tools/random_generators.py +38 -34
- tequila/utils/bitstrings.py +13 -7
- tequila/utils/exceptions.py +19 -5
- tequila/utils/joined_transformation.py +8 -10
- tequila/utils/keymap.py +0 -5
- tequila/utils/misc.py +6 -4
- tequila/version.py +1 -1
- tequila/wavefunction/qubit_wavefunction.py +52 -30
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/METADATA +23 -17
- tequila_basic-1.9.10.dist-info/RECORD +93 -0
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/WHEEL +1 -1
- tequila_basic-1.9.8.dist-info/RECORD +0 -86
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info/licenses}/LICENSE +0 -0
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,7 @@ from tequila import TequilaException
|
|
13
13
|
from shutil import rmtree
|
14
14
|
import logging
|
15
15
|
|
16
|
+
|
16
17
|
def get_psi(h_ferm, mol_name, n_elec, n_qubit, trunc, trunc_perc, get_fci):
|
17
18
|
if get_fci:
|
18
19
|
_, psis_fci = get_wavefunction(jordan_wigner(h_ferm), "fci", mol_name, n_elec)
|
@@ -25,8 +26,9 @@ def get_psi(h_ferm, mol_name, n_elec, n_qubit, trunc, trunc_perc, get_fci):
|
|
25
26
|
else:
|
26
27
|
return psi_appr
|
27
28
|
|
29
|
+
|
28
30
|
def do_fff(h_ferm, n_elec, options=None, restart=False, metric_estim=True):
|
29
|
-
|
31
|
+
"""
|
30
32
|
Main function for Fluid Fermionic Fragments methods.
|
31
33
|
Parameters
|
32
34
|
---------
|
@@ -55,28 +57,44 @@ def do_fff(h_ferm, n_elec, options=None, restart=False, metric_estim=True):
|
|
55
57
|
-------
|
56
58
|
Optimized fragment operators in FermionOperator type.
|
57
59
|
Unitary operators (orbtial rotations) diagonalizing each fragment.
|
58
|
-
|
60
|
+
"""
|
61
|
+
|
59
62
|
def process_options(options):
|
60
|
-
mol_name, fff_method, n_iter, calc_type, trunc_perc, mix, fff_thresh =
|
63
|
+
mol_name, fff_method, n_iter, calc_type, trunc_perc, mix, fff_thresh = "null", "r2", 5, "lr", 100.0, 0.0, 1e-4
|
61
64
|
if options is not None:
|
62
|
-
if "mol_name" in options:
|
63
|
-
|
64
|
-
if "
|
65
|
-
|
66
|
-
if "
|
67
|
-
|
68
|
-
if "
|
65
|
+
if "mol_name" in options:
|
66
|
+
mol_name = options["mol_name"]
|
67
|
+
if "fff_method" in options:
|
68
|
+
fff_method = options["fff_method"]
|
69
|
+
if "n_iter" in options:
|
70
|
+
n_iter = options["n_iter"]
|
71
|
+
if "calc_type" in options:
|
72
|
+
calc_type = options["calc_type"]
|
73
|
+
if "trunc_perc" in options:
|
74
|
+
trunc_perc = options["trunc_perc"]
|
75
|
+
if "mix" in options:
|
76
|
+
mix = options["mix"]
|
77
|
+
if "fff_thresh" in options:
|
78
|
+
fff_thresh = options["fff_thresh"]
|
69
79
|
return mol_name, fff_method, n_iter, calc_type, trunc_perc, mix, fff_thresh
|
70
80
|
|
71
81
|
def check_restart(restart, mol_name):
|
72
82
|
if mol_name == "null":
|
73
|
-
logging.warning(
|
74
|
-
|
75
|
-
|
76
|
-
|
83
|
+
logging.warning(
|
84
|
+
"Saving restart files to SAVE/null/ specify mol_name='desired_path_name' to save in SAVE/desired_path_name."
|
85
|
+
)
|
86
|
+
logging.warning(
|
87
|
+
"To use these restart files, run mv SAVE/null SAVE/desired_path_name, then run with options={mol_name:'desired_path_name'}."
|
88
|
+
)
|
89
|
+
print(
|
90
|
+
"Warning: Saving restart files to SAVE/null/ specify mol_name='desired_path_name' to save in SAVE/desired_path_name."
|
91
|
+
)
|
92
|
+
print(
|
93
|
+
"Warning: To use these restart files, run mv SAVE/null SAVE/desired_path_name, then run with options={mol_name:'desired_path_name'}."
|
94
|
+
)
|
77
95
|
rmtree("SAVE/" + mol_name.lower() + "/", ignore_errors=True)
|
78
96
|
else:
|
79
|
-
if not(restart):
|
97
|
+
if not (restart):
|
80
98
|
try:
|
81
99
|
os.remove("SAVE/" + mol_name.lower() + "/ev_dict.pkl")
|
82
100
|
except OSError:
|
@@ -103,26 +121,29 @@ def do_fff(h_ferm, n_elec, options=None, restart=False, metric_estim=True):
|
|
103
121
|
var_new.append(None)
|
104
122
|
new_tbts.append(None)
|
105
123
|
else:
|
106
|
-
new_all_ops.append(all_OPS[i+1])
|
107
|
-
var_new.append(vars_appr[i+1])
|
124
|
+
new_all_ops.append(all_OPS[i + 1])
|
125
|
+
var_new.append(vars_appr[i + 1])
|
108
126
|
new_tbts.append(tbts[i])
|
109
127
|
|
110
128
|
for idx, i in enumerate(apply_fff_to):
|
111
129
|
new_all_ops[i + 1] = new_ops_fff[idx]
|
112
130
|
new_tbts[i] = new_tbts_fff[idx]
|
113
|
-
var_new[i + 1] = var_new_fff[idx+1]
|
131
|
+
var_new[i + 1] = var_new_fff[idx + 1]
|
114
132
|
|
115
133
|
return new_all_ops, np.array(new_tbts), np.array(var_new)
|
116
134
|
|
117
135
|
mol_name, fff_method, n_iter, calc_type, trunc_perc, mix, fff_thresh = process_options(options)
|
118
136
|
check_restart(restart, mol_name)
|
119
137
|
|
120
|
-
if trunc_perc < 100
|
138
|
+
if trunc_perc < 100.0:
|
121
139
|
trunc = True
|
122
|
-
if mix < 1e-8:
|
140
|
+
if mix < 1e-8:
|
141
|
+
mix = 1e-3
|
123
142
|
else:
|
124
143
|
trunc = False
|
125
|
-
h_ferm, obt, tbt, n_qubit, all_OPS, U_OPS, tbts, cartan_tbts = get_init_ops(
|
144
|
+
h_ferm, obt, tbt, n_qubit, all_OPS, U_OPS, tbts, cartan_tbts = get_init_ops(
|
145
|
+
h_ferm, mol_name, calc_type, spin_orb=False
|
146
|
+
)
|
126
147
|
if metric_estim:
|
127
148
|
psi_fci, psi_appr = get_psi(h_ferm, mol_name, n_elec, n_qubit, trunc, trunc_perc, metric_estim)
|
128
149
|
print("===================================================")
|
@@ -132,12 +153,13 @@ def do_fff(h_ferm, n_elec, options=None, restart=False, metric_estim=True):
|
|
132
153
|
else:
|
133
154
|
psi_appr = get_psi(h_ferm, mol_name, n_elec, n_qubit, trunc, trunc_perc, False)
|
134
155
|
|
135
|
-
|
136
156
|
print("===================================================")
|
137
157
|
print("Getting approximate variances")
|
138
158
|
_, vars_appr = compute_ev_var_all_ops(psi_appr, n_qubit, all_OPS, trunc=trunc)
|
139
159
|
apply_fff_to = np.where(vars_appr[1:] > fff_thresh)[0]
|
140
|
-
all_ops_fff, uops_fff, cartan_tbts_fff, tbts_fff, vars_appr_fff = get_fff_obj(
|
160
|
+
all_ops_fff, uops_fff, cartan_tbts_fff, tbts_fff, vars_appr_fff = get_fff_obj(
|
161
|
+
all_OPS, U_OPS, cartan_tbts, tbts, vars_appr, apply_fff_to
|
162
|
+
)
|
141
163
|
print("Applying F3 to {} fragments out of {}".format(len(vars_appr_fff), len(vars_appr)))
|
142
164
|
print("===================================================")
|
143
165
|
|
@@ -157,7 +179,9 @@ def do_fff(h_ferm, n_elec, options=None, restart=False, metric_estim=True):
|
|
157
179
|
|
158
180
|
fff_var = fff_aux(n_iter, n_qubit, O_t.shape[0], O_t.shape[1], O_t, CovOO, Cov0, Covk, uops_fff, mix)
|
159
181
|
|
160
|
-
new_obt, new_tbts_fff, meas_alloc, var_new_fff = fff_multi_iter(
|
182
|
+
new_obt, new_tbts_fff, meas_alloc, var_new_fff = fff_multi_iter(
|
183
|
+
obt, tbts_fff, psi_appr, vars_appr_fff, fff_var, fff_method
|
184
|
+
)
|
161
185
|
new_all_ops, new_tbts, var_new = reorganize_fff_obj(new_obt, new_tbts_fff, var_new_fff, all_OPS, tbts, vars_appr)
|
162
186
|
|
163
187
|
print("Allocating measurements")
|
@@ -171,26 +195,27 @@ def do_fff(h_ferm, n_elec, options=None, restart=False, metric_estim=True):
|
|
171
195
|
all_uops = [uop_oe] + [U_OPS[i] for i in range(len(U_OPS))]
|
172
196
|
|
173
197
|
new_c_obt = np.einsum("pa, qb, pq", uop_oe, uop_oe, new_obt)
|
174
|
-
new_c_tbts = np.einsum(
|
198
|
+
new_c_tbts = np.einsum("ipa, iqb, irc, isd, ipqrs -> iabcd", U_OPS, U_OPS, U_OPS, U_OPS, new_tbts)
|
175
199
|
return new_all_ops, np.array(all_uops), new_c_obt, new_c_tbts, meas_alloc
|
176
200
|
|
201
|
+
|
177
202
|
def do_svd(h_ferm, n_elec):
|
178
203
|
spin_orb = False
|
179
|
-
obtb = ferm.get_obt_tbt(h_ferm, spin_orb
|
204
|
+
obtb = ferm.get_obt_tbt(h_ferm, spin_orb=spin_orb)
|
180
205
|
obt = obtb[0]
|
181
206
|
tbt_ham_opt = obtb[1]
|
182
207
|
n_qubit = ferm.qubit_number(h_ferm)
|
183
|
-
if spin_orb
|
208
|
+
if not spin_orb:
|
184
209
|
obt = ferm.obt_orb_to_so(obt)
|
185
210
|
|
186
211
|
n = obt.shape[0]
|
187
212
|
CARTAN_TBTS_tmp, TBTS_tmp, OPS_tmp, U_OPS_tmp = ferm.lr_decomp(tbt_ham_opt, spin_orb=spin_orb)
|
188
|
-
if spin_orb
|
213
|
+
if not spin_orb:
|
189
214
|
U_OPS = ferm.convert_u_to_so(U_OPS_tmp)
|
190
215
|
tbts = np.zeros([len(OPS_tmp), n, n, n, n])
|
191
216
|
cartan_tbts = np.zeros([len(OPS_tmp), n, n, n, n])
|
192
217
|
for i in range(len(OPS_tmp)):
|
193
|
-
tbts[i
|
218
|
+
tbts[i, :, :, :, :] = ferm.tbt_orb_to_so(TBTS_tmp[i, :, :, :, :])
|
194
219
|
cartan_tbts[i, :, :, :, :] = ferm.tbt_orb_to_so(CARTAN_TBTS_tmp[i, :, :, :, :])
|
195
220
|
else:
|
196
221
|
U_OPS = U_OPS_tmp
|
@@ -209,24 +234,25 @@ def do_svd(h_ferm, n_elec):
|
|
209
234
|
print("Allocating measurements")
|
210
235
|
_, vars_appr = compute_ev_var_all_ops(psis_appr[0], n_qubit, all_OPS, trunc=False)
|
211
236
|
|
212
|
-
meas_alloc = ferm.compute_meas_alloc(vars_appr, obt, tbts, n_qubit, mix
|
237
|
+
meas_alloc = ferm.compute_meas_alloc(vars_appr, obt, tbts, n_qubit, mix=0.0)
|
213
238
|
|
214
239
|
return all_uops, cartan_obt, cartan_tbts, meas_alloc
|
215
240
|
|
216
241
|
|
217
|
-
def get_fermion_wise(H, U, qubit_list
|
218
|
-
|
242
|
+
def get_fermion_wise(H, U, qubit_list=[]):
|
243
|
+
"""
|
219
244
|
Return z_form and orbital rotations over qubits at qubit_list
|
220
|
-
|
245
|
+
"""
|
221
246
|
|
222
|
-
H = ferm.cartan_tbt_to_ferm(H, spin_orb
|
247
|
+
H = ferm.cartan_tbt_to_ferm(H, spin_orb=True)
|
223
248
|
z_form = QubitHamiltonian(jordan_wigner(H))
|
224
249
|
|
225
|
-
circuit = ferm.get_orb_rot(U, qubit_list=qubit_list, tol
|
250
|
+
circuit = ferm.get_orb_rot(U, qubit_list=qubit_list, tol=1e-12)
|
226
251
|
return [z_form, circuit]
|
227
252
|
|
253
|
+
|
228
254
|
def get_init_ops(h_ferm, mol_name, calc_type, spin_orb, save=True):
|
229
|
-
|
255
|
+
"""
|
230
256
|
Parameters
|
231
257
|
----------
|
232
258
|
mol_name -
|
@@ -246,30 +272,30 @@ def get_init_ops(h_ferm, mol_name, calc_type, spin_orb, save=True):
|
|
246
272
|
U_OPS Orbital rotations
|
247
273
|
tbts LR Decomposition of two body integrals
|
248
274
|
cartan_tbts Polynomial functions of Pauli Z under qubit fermion mappings
|
249
|
-
|
275
|
+
"""
|
250
276
|
path = "SAVE/" + mol_name + "/"
|
251
277
|
if os.path.isfile(path + "tensor_terms.pkl"):
|
252
278
|
print("Using saved Hamiltonian from {}. Run with a different mol_name if this is not desired.".format(path))
|
253
|
-
with open(path + "tensor_terms.pkl",
|
279
|
+
with open(path + "tensor_terms.pkl", "rb") as file:
|
254
280
|
INIT = pickle.load(file)
|
255
281
|
obt = INIT[0]
|
256
282
|
tbt_ham_opt = INIT[1]
|
257
|
-
with open(path + "ham.pkl",
|
283
|
+
with open(path + "ham.pkl", "rb") as file:
|
258
284
|
h_ferm = pickle.load(file)
|
259
285
|
else:
|
260
|
-
obtb = ferm.get_obt_tbt(h_ferm, spin_orb
|
286
|
+
obtb = ferm.get_obt_tbt(h_ferm, spin_orb=spin_orb)
|
261
287
|
obt = obtb[0]
|
262
288
|
tbt_ham_opt = obtb[1]
|
263
289
|
if save:
|
264
290
|
Path(path).mkdir(exist_ok=True, parents=True)
|
265
|
-
with open(path + "tensor_terms.pkl",
|
291
|
+
with open(path + "tensor_terms.pkl", "wb") as file:
|
266
292
|
pickle.dump([obt, tbt_ham_opt], file)
|
267
|
-
with open(path + "ham.pkl",
|
293
|
+
with open(path + "ham.pkl", "wb") as file:
|
268
294
|
pickle.dump(h_ferm, file)
|
269
295
|
|
270
296
|
n_qubit = ferm.qubit_number(h_ferm)
|
271
297
|
|
272
|
-
if spin_orb
|
298
|
+
if not spin_orb:
|
273
299
|
obt = ferm.obt_orb_to_so(obt)
|
274
300
|
tbt = ferm.tbt_orb_to_so(tbt_ham_opt)
|
275
301
|
else:
|
@@ -278,8 +304,12 @@ def get_init_ops(h_ferm, mol_name, calc_type, spin_orb, save=True):
|
|
278
304
|
|
279
305
|
if calc_type.lower() == "lr":
|
280
306
|
if os.path.isfile(path + "lr.pkl"):
|
281
|
-
print(
|
282
|
-
|
307
|
+
print(
|
308
|
+
"Using saved LR decomposition saved in {}. Run with a different mol_name if this is not desired.".format(
|
309
|
+
path
|
310
|
+
)
|
311
|
+
)
|
312
|
+
with open(path + "lr.pkl", "rb") as file:
|
283
313
|
INIT = pickle.load(file)
|
284
314
|
CARTAN_TBTS_tmp = INIT[0]
|
285
315
|
TBTS_tmp = INIT[1]
|
@@ -287,16 +317,16 @@ def get_init_ops(h_ferm, mol_name, calc_type, spin_orb, save=True):
|
|
287
317
|
else:
|
288
318
|
CARTAN_TBTS_tmp, TBTS_tmp, OPS_tmp, U_OPS_tmp = ferm.lr_decomp(tbt_ham_opt, spin_orb=spin_orb)
|
289
319
|
if save:
|
290
|
-
with open(path + "lr.pkl",
|
320
|
+
with open(path + "lr.pkl", "wb") as file:
|
291
321
|
pickle.dump([CARTAN_TBTS_tmp, TBTS_tmp, U_OPS_tmp], file)
|
292
322
|
OPS = ferm.convert_tbts_to_frags(TBTS_tmp, spin_orb)
|
293
323
|
|
294
|
-
if spin_orb
|
324
|
+
if not spin_orb:
|
295
325
|
U_OPS = ferm.convert_u_to_so(U_OPS_tmp)
|
296
326
|
tbts = np.zeros([len(OPS), n, n, n, n])
|
297
327
|
cartan_tbts = np.zeros([len(OPS), n, n, n, n])
|
298
328
|
for i in range(len(OPS)):
|
299
|
-
tbts[i
|
329
|
+
tbts[i, :, :, :, :] = ferm.tbt_orb_to_so(TBTS_tmp[i, :, :, :, :])
|
300
330
|
cartan_tbts[i, :, :, :, :] = ferm.tbt_orb_to_so(CARTAN_TBTS_tmp[i, :, :, :, :])
|
301
331
|
|
302
332
|
else:
|
@@ -309,8 +339,9 @@ def get_init_ops(h_ferm, mol_name, calc_type, spin_orb, save=True):
|
|
309
339
|
all_OPS.append(OPS[i])
|
310
340
|
return h_ferm, obt, tbt, n_qubit, all_OPS, U_OPS, tbts, cartan_tbts
|
311
341
|
|
342
|
+
|
312
343
|
def get_wavefunction(Hq, wf_type, mol_name, n_elec, N=1, save=True):
|
313
|
-
|
344
|
+
"""
|
314
345
|
Parameters
|
315
346
|
----------
|
316
347
|
h_ferm -
|
@@ -326,15 +357,16 @@ def get_wavefunction(Hq, wf_type, mol_name, n_elec, N=1, save=True):
|
|
326
357
|
-------
|
327
358
|
energies Eigenenergies
|
328
359
|
psi Eigenstates
|
329
|
-
|
360
|
+
"""
|
330
361
|
n_qubits = count_qubits(Hq)
|
331
362
|
if wf_type.lower() == "fci":
|
332
|
-
|
363
|
+
return get_fci_states(Hq, mol_name, n_elec, n_qubits, N=N, save=save)
|
333
364
|
elif wf_type.lower() == "cisd":
|
334
|
-
|
365
|
+
return get_cisd_states(Hq, mol_name, n_elec, n_qubits, N=N, save=save)
|
366
|
+
|
335
367
|
|
336
368
|
def get_fci_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
337
|
-
|
369
|
+
"""
|
338
370
|
Parameters
|
339
371
|
----------
|
340
372
|
Hq -
|
@@ -350,12 +382,12 @@ def get_fci_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
|
350
382
|
-------
|
351
383
|
e_fci Energy of FCI ground state
|
352
384
|
psi_fci Wavefunction of FCI ground state
|
353
|
-
|
385
|
+
"""
|
354
386
|
|
355
387
|
path = "SAVE/" + mol_name.lower() + "/"
|
356
388
|
if os.path.isfile(path + "psi_fci.pkl"):
|
357
389
|
print("Using saved psi_fci in {}. Run with a different mol_name if this is not desired.".format(path))
|
358
|
-
with open(path + "psi_fci.pkl",
|
390
|
+
with open(path + "psi_fci.pkl", "rb") as file:
|
359
391
|
INIT = pickle.load(file)
|
360
392
|
e_fci = INIT[0]
|
361
393
|
psi_fci = INIT[1]
|
@@ -371,26 +403,27 @@ def get_fci_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
|
371
403
|
size_H = sparse_H.get_shape()[0]
|
372
404
|
if M >= size_H - 1:
|
373
405
|
M = size_H - 2
|
374
|
-
w,v = sp.sparse.linalg.eigsh(sparse_H, k
|
406
|
+
w, v = sp.sparse.linalg.eigsh(sparse_H, k=max(10, M), which="SA")
|
375
407
|
srt_arg = np.argsort(w)
|
376
408
|
w = w[srt_arg]
|
377
409
|
v = v[:, srt_arg]
|
378
410
|
values = []
|
379
411
|
vectors = []
|
380
412
|
for i in range(len(w)):
|
381
|
-
Nel = expectation(get_sparse_operator(Nop, n_qubits), v[:,i])
|
413
|
+
Nel = expectation(get_sparse_operator(Nop, n_qubits), v[:, i])
|
382
414
|
if np.abs(Nel - n_elec) < 1e-6:
|
383
415
|
values.append(w[i])
|
384
|
-
vectors.append(v[:,i])
|
385
|
-
if len(values) == N or i == len(w)-1:
|
416
|
+
vectors.append(v[:, i])
|
417
|
+
if len(values) == N or i == len(w) - 1:
|
386
418
|
if save:
|
387
419
|
Path(path).mkdir(exist_ok=True)
|
388
|
-
with open(path + "psi_fci.pkl",
|
420
|
+
with open(path + "psi_fci.pkl", "wb") as file:
|
389
421
|
pickle.dump([values, vectors], file)
|
390
422
|
return values, vectors
|
391
423
|
|
424
|
+
|
392
425
|
def get_cisd_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
393
|
-
|
426
|
+
"""
|
394
427
|
Parameters
|
395
428
|
----------
|
396
429
|
Hq -
|
@@ -406,11 +439,11 @@ def get_cisd_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
|
406
439
|
-------
|
407
440
|
e_cisd Energy of CISD ground state
|
408
441
|
psi_cisd Wavefunction of CISD ground state
|
409
|
-
|
442
|
+
"""
|
410
443
|
path = "SAVE/" + mol_name.lower() + "/"
|
411
444
|
if os.path.isfile(path + "psi_cisd.pkl"):
|
412
445
|
print("Using saved psi_cisd in {}. Run with a different mol_name if this is not desired.".format(path))
|
413
|
-
with open(path + "psi_cisd.pkl",
|
446
|
+
with open(path + "psi_cisd.pkl", "rb") as file:
|
414
447
|
INIT = pickle.load(file)
|
415
448
|
e_cisd = INIT[0]
|
416
449
|
psi_cisd = INIT[1]
|
@@ -423,7 +456,7 @@ def get_cisd_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
|
423
456
|
M = size_H - 1
|
424
457
|
else:
|
425
458
|
M = N
|
426
|
-
w,v = sp.sparse.linalg.eigsh(H_mat_cisd, k
|
459
|
+
w, v = sp.sparse.linalg.eigsh(H_mat_cisd, k=M, which="SA")
|
427
460
|
order = np.argsort(w)
|
428
461
|
values = w[order].tolist()
|
429
462
|
vectors = []
|
@@ -431,17 +464,18 @@ def get_cisd_states(Hq, mol_name, n_elec, n_qubits, N=1, save=True):
|
|
431
464
|
wfs = np.zeros(2**n_qubits)
|
432
465
|
for iidx, iindx in enumerate(indices):
|
433
466
|
wfs[iindx] = v[iidx, i]
|
434
|
-
wfs = wfs/np.linalg.norm(wfs)
|
467
|
+
wfs = wfs / np.linalg.norm(wfs)
|
435
468
|
vectors.append(wfs)
|
436
469
|
if save:
|
437
470
|
Path(path).mkdir(exist_ok=True)
|
438
|
-
with open(path + "psi_cisd.pkl",
|
471
|
+
with open(path + "psi_cisd.pkl", "wb") as file:
|
439
472
|
pickle.dump([values, vectors], file)
|
440
473
|
|
441
474
|
return values, vectors
|
442
475
|
|
476
|
+
|
443
477
|
def compute_and_print_ev_var(psi, h_ferm, all_OPS, meas_alloc=None):
|
444
|
-
|
478
|
+
"""
|
445
479
|
Parameters
|
446
480
|
----------
|
447
481
|
psi Wavefunction
|
@@ -451,12 +485,13 @@ def compute_and_print_ev_var(psi, h_ferm, all_OPS, meas_alloc=None):
|
|
451
485
|
it is computed according to the variances [see Quantum 5, 385 (2021)].
|
452
486
|
|
453
487
|
Prints out the variances and expectation of each fragment over psi.
|
454
|
-
|
488
|
+
"""
|
455
489
|
n_qubit = ferm.qubit_number(h_ferm)
|
456
490
|
h_const = h_ferm.constant
|
457
491
|
exps, variances = compute_ev_var_all_ops(psi, n_qubit, all_OPS)
|
458
|
-
if meas_alloc is None:
|
459
|
-
|
492
|
+
if meas_alloc is None:
|
493
|
+
meas_alloc = ferm.compute_meas_alloc(variances)
|
494
|
+
scaled_variances = np.divide(variances, meas_alloc)
|
460
495
|
|
461
496
|
scaled_variances_sum = np.sum(scaled_variances)
|
462
497
|
print("Full variances:")
|
@@ -466,8 +501,9 @@ def compute_and_print_ev_var(psi, h_ferm, all_OPS, meas_alloc=None):
|
|
466
501
|
print("Variance metric value is {}".format(scaled_variances_sum))
|
467
502
|
print("Exp value is {}".format(np.sum(exps) + h_const))
|
468
503
|
|
504
|
+
|
469
505
|
def compute_ev_var_all_ops(psi, n_qubit, all_OPS, trunc=False):
|
470
|
-
|
506
|
+
"""
|
471
507
|
Parameters
|
472
508
|
----------
|
473
509
|
psi Wavefunction
|
@@ -478,7 +514,7 @@ def compute_ev_var_all_ops(psi, n_qubit, all_OPS, trunc=False):
|
|
478
514
|
-------
|
479
515
|
exps Expectations of the fragments over psi
|
480
516
|
variances Variances of the fragments over psi
|
481
|
-
|
517
|
+
"""
|
482
518
|
num_frags = len(all_OPS)
|
483
519
|
exps = np.zeros(num_frags)
|
484
520
|
variances = np.zeros(num_frags)
|
@@ -487,8 +523,9 @@ def compute_ev_var_all_ops(psi, n_qubit, all_OPS, trunc=False):
|
|
487
523
|
variances[i] = ferm.variance_value(all_OPS[i], psi, n_qubit, trunc=trunc)
|
488
524
|
return exps, variances
|
489
525
|
|
526
|
+
|
490
527
|
def init_ev_dict(mol_name, psi, n_qubit, trunc=False, spin_orb=True, save=True):
|
491
|
-
|
528
|
+
"""
|
492
529
|
Parameters
|
493
530
|
----------
|
494
531
|
psi Wavefunction
|
@@ -498,14 +535,14 @@ def init_ev_dict(mol_name, psi, n_qubit, trunc=False, spin_orb=True, save=True):
|
|
498
535
|
Returns
|
499
536
|
-------
|
500
537
|
ev_dict_all Returns a dictionary of expectaion and variances of fermionic operators over psi
|
501
|
-
|
538
|
+
"""
|
502
539
|
if spin_orb:
|
503
540
|
n = n_qubit
|
504
541
|
else:
|
505
542
|
n = n_qubit // 2
|
506
543
|
path = "SAVE/" + mol_name.lower() + "/"
|
507
544
|
if os.path.isfile(path + "ev_dict.pkl"):
|
508
|
-
with open(path + "ev_dict.pkl",
|
545
|
+
with open(path + "ev_dict.pkl", "rb") as file:
|
509
546
|
ev_dict_all = pickle.load(file)
|
510
547
|
else:
|
511
548
|
ev_dict_E = ferm.get_E(psi, n, n_qubit, trunc=trunc)
|
@@ -513,16 +550,18 @@ def init_ev_dict(mol_name, psi, n_qubit, trunc=False, spin_orb=True, save=True):
|
|
513
550
|
ev_dict_all = ferm.reorganize(n, ev_dict_E, ev_dict_EE)
|
514
551
|
if save:
|
515
552
|
Path(path).mkdir(exist_ok=True)
|
516
|
-
with open(path + "ev_dict.pkl",
|
553
|
+
with open(path + "ev_dict.pkl", "wb") as file:
|
517
554
|
pickle.dump(ev_dict_all, file)
|
518
555
|
return ev_dict_all
|
519
556
|
|
557
|
+
|
520
558
|
def check_method(method):
|
521
559
|
if method.lower() not in ["full", "r1", "r2"]:
|
522
|
-
|
560
|
+
raise TequilaException("method has to be specified as one from Full, R1 or R2")
|
561
|
+
|
523
562
|
|
524
563
|
def compute_O_t(U_OPS, method, tbts, mol_name, save=True):
|
525
|
-
|
564
|
+
"""
|
526
565
|
Parameters
|
527
566
|
----------
|
528
567
|
U_OPS Orbital rotations
|
@@ -532,56 +571,57 @@ def compute_O_t(U_OPS, method, tbts, mol_name, save=True):
|
|
532
571
|
Returns
|
533
572
|
-------
|
534
573
|
O_t O_alpha (arXiv:2208.14490v3 - Section 2.3)
|
535
|
-
|
574
|
+
"""
|
536
575
|
check_method(method)
|
537
576
|
|
538
577
|
path = "SAVE/" + mol_name.lower() + "/" + method.lower() + "/"
|
539
578
|
if os.path.isfile(path + "O_t.pkl"):
|
540
|
-
with open(path + "O_t.pkl",
|
579
|
+
with open(path + "O_t.pkl", "rb") as file:
|
541
580
|
O_t = pickle.load(file)
|
542
581
|
return O_t
|
543
582
|
|
544
583
|
num_frags = U_OPS.shape[0]
|
545
584
|
n = U_OPS.shape[1]
|
546
585
|
if method.lower() == "full":
|
547
|
-
n_param = n//2
|
586
|
+
n_param = n // 2
|
548
587
|
else:
|
549
588
|
n_param = 1
|
550
|
-
ratios = np.zeros([num_frags,n])
|
589
|
+
ratios = np.zeros([num_frags, n])
|
551
590
|
for frag1 in range(num_frags):
|
552
591
|
for p1 in range(n):
|
553
592
|
if method.lower() == "r1":
|
554
|
-
ratios[frag1, p1] = tbts[frag1, p1,p1,p1,p1]
|
593
|
+
ratios[frag1, p1] = tbts[frag1, p1, p1, p1, p1]
|
555
594
|
else:
|
556
|
-
ratios[frag1, p1] = np.sum([tbts[frag1,p1,p1,r1,r1] for r1 in range(n)])
|
595
|
+
ratios[frag1, p1] = np.sum([tbts[frag1, p1, p1, r1, r1] for r1 in range(n)])
|
557
596
|
O_t_tmp = np.zeros([num_frags, n, n, n])
|
558
597
|
O_t = np.zeros([num_frags, n_param, n, n])
|
559
|
-
Otmp = np.zeros([n,n])
|
598
|
+
Otmp = np.zeros([n, n])
|
560
599
|
for k in range(num_frags):
|
561
600
|
Umat = U_OPS[k, :, :]
|
562
601
|
for p in range(n):
|
563
602
|
for r, s in product(range(n), repeat=2):
|
564
|
-
Otmp[r,s] = Umat[r,p] * Umat[s,p].conjugate()
|
565
|
-
O_t_tmp[k,p
|
603
|
+
Otmp[r, s] = Umat[r, p] * Umat[s, p].conjugate()
|
604
|
+
O_t_tmp[k, p, :, :] = Otmp
|
566
605
|
if method.lower() == "full":
|
567
606
|
for k in range(num_frags):
|
568
607
|
for p in range(n_param):
|
569
608
|
for alpha in range(2):
|
570
|
-
O_t[k,p
|
609
|
+
O_t[k, p, :, :] += O_t_tmp[k, 2 * (p) + alpha, :, :]
|
571
610
|
else:
|
572
611
|
for k in range(num_frags):
|
573
612
|
for p1 in range(n):
|
574
|
-
O_t[k,0
|
613
|
+
O_t[k, 0, :, :] += ratios[k, p1] * O_t_tmp[k, p1, :, :]
|
575
614
|
|
576
615
|
if save:
|
577
616
|
Path(path).mkdir(exist_ok=True)
|
578
|
-
with open(path + "O_t.pkl",
|
579
|
-
|
617
|
+
with open(path + "O_t.pkl", "wb") as file:
|
618
|
+
pickle.dump(O_t, file)
|
580
619
|
|
581
620
|
return O_t
|
582
621
|
|
622
|
+
|
583
623
|
def compute_all_covs(all_OPS, O_t, psi, n_qubit, mol_name, method, trunc=False, save=True):
|
584
|
-
|
624
|
+
"""
|
585
625
|
Parameters
|
586
626
|
----------
|
587
627
|
psi Wavefunction
|
@@ -594,11 +634,11 @@ def compute_all_covs(all_OPS, O_t, psi, n_qubit, mol_name, method, trunc=False,
|
|
594
634
|
Returns
|
595
635
|
-------
|
596
636
|
all_covs --------------------
|
597
|
-
|
637
|
+
"""
|
598
638
|
path = "SAVE/" + mol_name.lower() + "/" + method.lower() + "/"
|
599
639
|
check_method(method)
|
600
640
|
if os.path.isfile(path + "all_covs.pkl"):
|
601
|
-
with open(path + "all_covs.pkl",
|
641
|
+
with open(path + "all_covs.pkl", "rb") as file:
|
602
642
|
all_covs = pickle.load(file)
|
603
643
|
return all_covs
|
604
644
|
|
@@ -607,17 +647,18 @@ def compute_all_covs(all_OPS, O_t, psi, n_qubit, mol_name, method, trunc=False,
|
|
607
647
|
ops1 = []
|
608
648
|
for p in range(O_t.shape[1]):
|
609
649
|
ops1.append(ferm.obt_to_ferm(O_t[frag_idx, p, :, :], True))
|
610
|
-
all_covs.append(ferm.compute_covk(ops1, all_OPS[frag_idx+1], psi, n_qubit, trunc=trunc))
|
650
|
+
all_covs.append(ferm.compute_covk(ops1, all_OPS[frag_idx + 1], psi, n_qubit, trunc=trunc))
|
611
651
|
|
612
652
|
if save:
|
613
653
|
Path(path).mkdir(exist_ok=True)
|
614
|
-
with open(path + "all_covs.pkl",
|
654
|
+
with open(path + "all_covs.pkl", "wb") as file:
|
615
655
|
pickle.dump(all_covs, file)
|
616
656
|
|
617
657
|
return all_covs
|
618
658
|
|
659
|
+
|
619
660
|
def compute_cov_OO(O_t, ev_dict_all, mol_name, method, save=True):
|
620
|
-
|
661
|
+
"""
|
621
662
|
Parameters
|
622
663
|
----------
|
623
664
|
O_t O_alpha (arXiv:2208.14490v3 - Section 2.3)
|
@@ -628,11 +669,11 @@ def compute_cov_OO(O_t, ev_dict_all, mol_name, method, save=True):
|
|
628
669
|
Returns
|
629
670
|
-------
|
630
671
|
covmat Dictionary of covariances between O_alpha's
|
631
|
-
|
672
|
+
"""
|
632
673
|
path = "SAVE/" + mol_name.lower() + "/" + method.lower() + "/"
|
633
674
|
check_method(method)
|
634
675
|
if os.path.isfile(path + "cov_OO.pkl"):
|
635
|
-
with open(path + "cov_OO.pkl",
|
676
|
+
with open(path + "cov_OO.pkl", "rb") as file:
|
636
677
|
covmat = pickle.load(file)
|
637
678
|
return covmat
|
638
679
|
|
@@ -646,17 +687,18 @@ def compute_cov_OO(O_t, ev_dict_all, mol_name, method, save=True):
|
|
646
687
|
for q in range(n):
|
647
688
|
ind1 = n * (k) + p
|
648
689
|
ind2 = n * (l) + q
|
649
|
-
covmat[ind1, ind2] = ferm.covariance_ob_ob(O_t[k,p
|
690
|
+
covmat[ind1, ind2] = ferm.covariance_ob_ob(O_t[k, p, :, :], O_t[l, q, :, :], ev_dict_all[0])
|
650
691
|
|
651
692
|
if save:
|
652
693
|
Path(path).mkdir(exist_ok=True)
|
653
|
-
with open(path + "cov_OO.pkl",
|
694
|
+
with open(path + "cov_OO.pkl", "wb") as file:
|
654
695
|
pickle.dump(covmat, file)
|
655
696
|
|
656
697
|
return covmat
|
657
698
|
|
699
|
+
|
658
700
|
def compute_cov_O(O_t, H0, ev_dict_all, mol_name, method, save=True):
|
659
|
-
|
701
|
+
"""
|
660
702
|
Parameters
|
661
703
|
----------
|
662
704
|
O_t O_alpha (arXiv:2208.14490v3 - Section 2.3)
|
@@ -668,11 +710,11 @@ def compute_cov_O(O_t, H0, ev_dict_all, mol_name, method, save=True):
|
|
668
710
|
Returns
|
669
711
|
-------
|
670
712
|
covvec Covariance of O_alpha's and original fragments
|
671
|
-
|
713
|
+
"""
|
672
714
|
path = "SAVE/" + mol_name.lower() + "/" + method.lower() + "/"
|
673
715
|
check_method(method)
|
674
716
|
if os.path.isfile(path + "cov_O.pkl"):
|
675
|
-
with open(path + "cov_O.pkl",
|
717
|
+
with open(path + "cov_O.pkl", "rb") as file:
|
676
718
|
covvec = pickle.load(file)
|
677
719
|
return covvec
|
678
720
|
|
@@ -683,21 +725,23 @@ def compute_cov_O(O_t, H0, ev_dict_all, mol_name, method, save=True):
|
|
683
725
|
for k in range(nf):
|
684
726
|
for p in range(n):
|
685
727
|
ind = n * (k) + p
|
686
|
-
covl = ferm.covariance_ob_ob(O_t[k,p
|
687
|
-
covr = ferm.covariance_ob_ob(H0, O_t[k,p
|
728
|
+
covl = ferm.covariance_ob_ob(O_t[k, p, :, :], H0, ev_dict_all[0])
|
729
|
+
covr = ferm.covariance_ob_ob(H0, O_t[k, p, :, :], ev_dict_all[0])
|
688
730
|
covvec[ind] = covl + covr
|
689
731
|
|
690
732
|
if save:
|
691
733
|
Path(path).mkdir(exist_ok=True)
|
692
|
-
with open(path + "cov_O.pkl",
|
734
|
+
with open(path + "cov_O.pkl", "wb") as file:
|
693
735
|
pickle.dump(covvec, file)
|
694
736
|
|
695
737
|
return covvec
|
696
738
|
|
739
|
+
|
697
740
|
class fff_aux:
|
698
|
-
|
741
|
+
"""
|
699
742
|
Class containing all variables needed for FFF.
|
700
|
-
|
743
|
+
"""
|
744
|
+
|
701
745
|
def __init__(self, n_iter, nq, nf, n, o_t, coo, c0, ck, uops, mix):
|
702
746
|
self.n_iter = n_iter
|
703
747
|
self.nq = nq
|
@@ -710,8 +754,9 @@ class fff_aux:
|
|
710
754
|
self.uops = uops
|
711
755
|
self.mix = mix
|
712
756
|
|
757
|
+
|
713
758
|
def fff_multi_iter(obt, tbts, psi, varbs, fff_var, method):
|
714
|
-
|
759
|
+
"""
|
715
760
|
Parameters
|
716
761
|
----------
|
717
762
|
psi Wavefunction
|
@@ -726,24 +771,26 @@ def fff_multi_iter(obt, tbts, psi, varbs, fff_var, method):
|
|
726
771
|
new_obt Repartitioned one body term
|
727
772
|
new_tbts Repartitioned two body terms
|
728
773
|
m0 Optimal Measurement Allocation
|
729
|
-
|
774
|
+
"""
|
730
775
|
check_method(method)
|
731
776
|
|
732
777
|
ntmp = obt.shape[0]
|
733
|
-
obt_list = np.zeros([
|
734
|
-
tbts_list = np.zeros([fff_var.n_iter+1, fff_var.nf, ntmp, ntmp, ntmp, ntmp])
|
735
|
-
var_list = np.zeros([fff_var.n_iter+1, fff_var.nf+1])
|
736
|
-
obt_list[0
|
737
|
-
tbts_list[0
|
738
|
-
var_list[0
|
778
|
+
obt_list = np.zeros([fff_var.n_iter + 1, ntmp, ntmp])
|
779
|
+
tbts_list = np.zeros([fff_var.n_iter + 1, fff_var.nf, ntmp, ntmp, ntmp, ntmp])
|
780
|
+
var_list = np.zeros([fff_var.n_iter + 1, fff_var.nf + 1])
|
781
|
+
obt_list[0, :, :] = obt
|
782
|
+
tbts_list[0, :, :, :, :, :] = tbts
|
783
|
+
var_list[0, :] = varbs
|
739
784
|
new_c0 = fff_var.c0
|
740
785
|
new_ck = fff_var.ck
|
741
786
|
for i in range(fff_var.n_iter):
|
742
787
|
print("Progress: iteration #{} out of {}".format(i + 1, fff_var.n_iter))
|
743
|
-
new_obt, new_tbts, new_vars, new_c0, new_ck = ferm.fff_1_iter(
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
788
|
+
new_obt, new_tbts, new_vars, new_c0, new_ck = ferm.fff_1_iter(
|
789
|
+
obt_list[i, :, :], tbts_list[i, :, :, :, :, :], var_list[i, :], new_c0, new_ck, fff_var
|
790
|
+
)
|
791
|
+
obt_list[i + 1, :, :] = new_obt
|
792
|
+
tbts_list[i + 1, :, :, :, :, :] = new_tbts
|
793
|
+
var_list[i + 1, :] = new_vars
|
794
|
+
var0 = var_list[len(var_list) - 1, :]
|
748
795
|
m0 = ferm.compute_meas_alloc(var0, obt, tbts, fff_var.nq, fff_var.mix)
|
749
|
-
return obt_list[len(obt_list)-1
|
796
|
+
return obt_list[len(obt_list) - 1, :, :], tbts_list[len(tbts_list) - 1, :, :, :, :, :], m0, var0
|