tequila-basic 1.9.8__py3-none-any.whl → 1.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. tequila/__init__.py +29 -14
  2. tequila/apps/__init__.py +14 -5
  3. tequila/apps/_unary_state_prep_impl.py +145 -112
  4. tequila/apps/adapt/__init__.py +9 -1
  5. tequila/apps/adapt/adapt.py +154 -113
  6. tequila/apps/krylov/__init__.py +1 -1
  7. tequila/apps/krylov/krylov.py +23 -21
  8. tequila/apps/robustness/helpers.py +10 -6
  9. tequila/apps/robustness/interval.py +238 -156
  10. tequila/apps/unary_state_prep.py +29 -23
  11. tequila/autograd_imports.py +8 -5
  12. tequila/circuit/__init__.py +2 -1
  13. tequila/circuit/_gates_impl.py +135 -67
  14. tequila/circuit/circuit.py +177 -88
  15. tequila/circuit/compiler.py +114 -105
  16. tequila/circuit/gates.py +288 -120
  17. tequila/circuit/gradient.py +35 -23
  18. tequila/circuit/noise.py +83 -74
  19. tequila/circuit/postselection.py +120 -0
  20. tequila/circuit/pyzx.py +10 -6
  21. tequila/circuit/qasm.py +201 -83
  22. tequila/circuit/qpic.py +63 -61
  23. tequila/grouping/binary_rep.py +148 -146
  24. tequila/grouping/binary_utils.py +84 -75
  25. tequila/grouping/compile_groups.py +334 -230
  26. tequila/grouping/ev_utils.py +77 -41
  27. tequila/grouping/fermionic_functions.py +383 -308
  28. tequila/grouping/fermionic_methods.py +170 -123
  29. tequila/grouping/overlapping_methods.py +69 -52
  30. tequila/hamiltonian/paulis.py +12 -13
  31. tequila/hamiltonian/paulistring.py +1 -1
  32. tequila/hamiltonian/qubit_hamiltonian.py +45 -35
  33. tequila/ml/__init__.py +1 -0
  34. tequila/ml/interface_torch.py +19 -16
  35. tequila/ml/ml_api.py +11 -10
  36. tequila/ml/utils_ml.py +12 -11
  37. tequila/objective/__init__.py +8 -3
  38. tequila/objective/braket.py +55 -47
  39. tequila/objective/objective.py +91 -56
  40. tequila/objective/qtensor.py +36 -27
  41. tequila/optimizers/__init__.py +31 -23
  42. tequila/optimizers/_containers.py +11 -7
  43. tequila/optimizers/optimizer_base.py +111 -83
  44. tequila/optimizers/optimizer_gd.py +258 -231
  45. tequila/optimizers/optimizer_gpyopt.py +56 -42
  46. tequila/optimizers/optimizer_scipy.py +157 -112
  47. tequila/quantumchemistry/__init__.py +66 -38
  48. tequila/quantumchemistry/chemistry_tools.py +394 -203
  49. tequila/quantumchemistry/encodings.py +121 -13
  50. tequila/quantumchemistry/madness_interface.py +170 -96
  51. tequila/quantumchemistry/orbital_optimizer.py +86 -40
  52. tequila/quantumchemistry/psi4_interface.py +166 -97
  53. tequila/quantumchemistry/pyscf_interface.py +70 -23
  54. tequila/quantumchemistry/qc_base.py +866 -414
  55. tequila/simulators/__init__.py +0 -3
  56. tequila/simulators/simulator_api.py +258 -106
  57. tequila/simulators/simulator_aqt.py +102 -0
  58. tequila/simulators/simulator_base.py +156 -55
  59. tequila/simulators/simulator_cirq.py +58 -42
  60. tequila/simulators/simulator_cudaq.py +600 -0
  61. tequila/simulators/simulator_ddsim.py +390 -0
  62. tequila/simulators/simulator_mqp.py +30 -0
  63. tequila/simulators/simulator_pyquil.py +190 -171
  64. tequila/simulators/simulator_qibo.py +95 -87
  65. tequila/simulators/simulator_qiskit.py +124 -114
  66. tequila/simulators/simulator_qlm.py +52 -26
  67. tequila/simulators/simulator_qulacs.py +85 -59
  68. tequila/simulators/simulator_spex.py +464 -0
  69. tequila/simulators/simulator_symbolic.py +6 -5
  70. tequila/simulators/test_spex_simulator.py +208 -0
  71. tequila/tools/convenience.py +4 -4
  72. tequila/tools/qng.py +72 -64
  73. tequila/tools/random_generators.py +38 -34
  74. tequila/utils/bitstrings.py +13 -7
  75. tequila/utils/exceptions.py +19 -5
  76. tequila/utils/joined_transformation.py +8 -10
  77. tequila/utils/keymap.py +0 -5
  78. tequila/utils/misc.py +6 -4
  79. tequila/version.py +1 -1
  80. tequila/wavefunction/qubit_wavefunction.py +52 -30
  81. {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/METADATA +23 -17
  82. tequila_basic-1.9.10.dist-info/RECORD +93 -0
  83. {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/WHEEL +1 -1
  84. tequila_basic-1.9.8.dist-info/RECORD +0 -86
  85. {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info/licenses}/LICENSE +0 -0
  86. {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,18 @@
1
1
  # Generalized Adaptive Solvers
2
2
  # as described in Kottmann, Anand, Aspuru-Guzik: https://doi.org/10.1039/D0SC06627C
3
3
 
4
- from tequila import QCircuit, QubitHamiltonian, gates, paulis, grad, simulate, TequilaWarning, TequilaException, minimize, ExpectationValue
4
+ from tequila import (
5
+ QCircuit,
6
+ QubitHamiltonian,
7
+ gates,
8
+ paulis,
9
+ grad,
10
+ simulate,
11
+ TequilaWarning,
12
+ TequilaException,
13
+ minimize,
14
+ ExpectationValue,
15
+ )
5
16
  import numpy
6
17
  import dataclasses
7
18
  import warnings
@@ -10,32 +21,39 @@ from itertools import combinations
10
21
 
11
22
  @dataclasses.dataclass
12
23
  class AdaptParameters:
13
-
14
- optimizer_args: dict = dataclasses.field(default_factory=lambda : {"method":"bfgs", "silent":True, "method_options":{"gtol":1.e-5}})
15
- compile_args: dict = dataclasses.field(default_factory=lambda : {})
16
- maxiter:int = 100
24
+ optimizer_args: dict = dataclasses.field(
25
+ default_factory=lambda: {"method": "bfgs", "silent": True, "method_options": {"gtol": 1.0e-5}}
26
+ )
27
+ compile_args: dict = dataclasses.field(default_factory=lambda: {})
28
+ maxiter: int = 100
17
29
  batch_size = 1
18
30
  energy_convergence: float = None
19
- gradient_convergence: float = 1.e-3
20
- max_gradient_convergence: float = 5.e-4
21
- degeneracy_threshold: float = 5.e-4
31
+ gradient_convergence: float = 1.0e-3
32
+ max_gradient_convergence: float = 5.0e-4
33
+ degeneracy_threshold: float = 5.0e-4
22
34
  silent: bool = False
23
-
35
+
24
36
  def __post_init__(self):
25
37
  # avoid stacking of same operator-types in a row
26
38
  if "method_options" in self.optimizer_args:
27
39
  if "gtol" in self.optimizer_args["method_options"]:
28
- gtol=self.optimizer_args["method_options"]["gtol"]
40
+ gtol = self.optimizer_args["method_options"]["gtol"]
29
41
  if gtol > self.max_gradient_convergence:
30
- warnings.warn("you specified screening threshold max_gradient_convergence={} but optimizer theshold gtol={}. This will lead to accumulation of the same operator, will set max_gradient_convergence={}".format(self.max_gradient_convergence, gtol, gtol*2),TequilaWarning)
31
- self.max_gradient_convergence = gtol*2.0
32
-
42
+ warnings.warn(
43
+ "you specified screening threshold max_gradient_convergence={} but optimizer theshold gtol={}. This will lead to accumulation of the same operator, will set max_gradient_convergence={}".format(
44
+ self.max_gradient_convergence, gtol, gtol * 2
45
+ ),
46
+ TequilaWarning,
47
+ )
48
+ self.max_gradient_convergence = gtol * 2.0
49
+
33
50
  def __str__(self):
34
51
  info = ""
35
- for k,v in self.__dict__.items():
52
+ for k, v in self.__dict__.items():
36
53
  info += "{:30} : {}\n".format(k, v)
37
54
  return info
38
55
 
56
+
39
57
  class AdaptPoolBase:
40
58
  """
41
59
  Standard class for operator pools in Adapt
@@ -44,11 +62,11 @@ class AdaptPoolBase:
44
62
 
45
63
  generators: list = None
46
64
 
47
- __n: int = 0 # for iterator, don't touch
65
+ __n: int = 0 # for iterator, don't touch
48
66
 
49
67
  def __init__(self, generators, trotter_steps=1):
50
68
  self.generators = generators
51
- self.trotter_steps=1
69
+ self.trotter_steps = 1
52
70
 
53
71
  def make_unitary(self, k, label) -> QCircuit:
54
72
  return gates.Trotterized(generators=[self.generators[k]], angles=[(str(k), label)], steps=self.trotter_steps)
@@ -60,7 +78,7 @@ class AdaptPoolBase:
60
78
  def __next__(self):
61
79
  if self.__n < len(self.generators):
62
80
  result = self.__n
63
- self.__n +=1
81
+ self.__n += 1
64
82
  return result
65
83
  else:
66
84
  raise StopIteration
@@ -68,6 +86,7 @@ class AdaptPoolBase:
68
86
  def __str__(self):
69
87
  return "{} with {} Generators".format(type(self).__name__, len(self.generators))
70
88
 
89
+
71
90
  class ObjectiveFactoryBase:
72
91
  """
73
92
  Default class to create the objective in the Adapt solver
@@ -75,9 +94,9 @@ class ObjectiveFactoryBase:
75
94
  and U will be the circuit that is adaptively constructed
76
95
  """
77
96
 
78
- Upre: QCircuit=QCircuit()
79
- Upost: QCircuit=QCircuit()
80
- H : QubitHamiltonian=None
97
+ Upre: QCircuit = QCircuit()
98
+ Upost: QCircuit = QCircuit()
99
+ H: QubitHamiltonian = None
81
100
 
82
101
  def __init__(self, H=None, Upre=None, Upost=None, *args, **kwargs):
83
102
  if H is None:
@@ -102,19 +121,18 @@ class ObjectiveFactoryBase:
102
121
  def __str__(self):
103
122
  return "{}".format(type(self).__name__)
104
123
 
105
- class Adapt:
106
124
 
125
+ class Adapt:
107
126
  operator_pool: AdaptPoolBase = None
108
127
  objective_factory = None
109
128
  parameters: AdaptParameters = AdaptParameters()
110
129
 
111
-
112
130
  def make_objective(self, U, variables=None, *args, **kwargs):
113
131
  return self.objective_factory(U=U, variables=variables, *args, **{**self.parameters.compile_args, **kwargs})
114
132
 
115
133
  def __init__(self, operator_pool, H=None, objective_factory=None, *args, **kwargs):
116
134
  """
117
- For the Default Adaptive Solver kwargs can contain Upre and Upost as described in:
135
+ For the Default Adaptive Solver kwargs can contain Upre and Upost as described in:
118
136
  See out online tutorial for more information: https://github.com/tequilahub/tequila-tutorials
119
137
  Better code-documentation will be there at some point ....
120
138
  """
@@ -126,11 +144,14 @@ class Adapt:
126
144
 
127
145
  filtered = {k: v for k, v in kwargs.items() if k in self.parameters.__dict__}
128
146
  self.parameters = AdaptParameters(*args, **filtered)
129
- if self.parameters.silent and not self.parameters.optimizer_args is None and "silent" not in self.parameters.optimizer_args:
147
+ if (
148
+ self.parameters.silent
149
+ and self.parameters.optimizer_args is not None
150
+ and "silent" not in self.parameters.optimizer_args
151
+ ):
130
152
  self.parameters.optimizer_args["silent"] = True
131
153
 
132
- def __call__(self, static_variables = None, mp_pool=None, label=None, variables=None, *args, **kwargs):
133
-
154
+ def __call__(self, static_variables=None, mp_pool=None, label=None, variables=None, *args, **kwargs):
134
155
  if not self.parameters.silent:
135
156
  print("Starting Adaptive Solver")
136
157
  print(self)
@@ -155,28 +176,29 @@ class Adapt:
155
176
  elif hasattr(self.operator_pool, "initialize_circuit"):
156
177
  U = self.operator_pool.initialize_circuit()
157
178
 
158
- initial_objective = self.make_objective(U, variables = variables)
179
+ initial_objective = self.make_objective(U, variables=variables)
159
180
  for k in initial_objective.extract_variables():
160
181
  if k not in variables:
161
- warnings.warn("variable {} of initial objective not given, setting to 0.0 and activate optimization".format(k), TequilaWarning)
182
+ warnings.warn(
183
+ "variable {} of initial objective not given, setting to 0.0 and activate optimization".format(k),
184
+ TequilaWarning,
185
+ )
162
186
  variables[k] = 0.0
163
187
 
164
- if len(initial_objective.extract_variables())>0:
188
+ if len(initial_objective.extract_variables()) > 0:
165
189
  active_variables = [k for k in variables if k not in static_variables]
166
- if len(active_variables)>0:
190
+ if len(active_variables) > 0:
167
191
  if not self.parameters.silent:
168
192
  print("initial optimization")
169
- margs = {"initial_values":variables}
170
- margs = {**margs,**self.parameters.compile_args,**self.parameters.optimizer_args}
171
- result = minimize(objective=initial_objective,
172
- variables=active_variables,
173
- **margs)
193
+ margs = {"initial_values": variables}
194
+ margs = {**margs, **self.parameters.compile_args, **self.parameters.optimizer_args}
195
+ result = minimize(objective=initial_objective, variables=active_variables, **margs)
174
196
 
175
197
  variables = result.variables
176
198
 
177
199
  energy = simulate(initial_objective, variables=variables)
178
200
  for iter in range(self.parameters.maxiter):
179
- current_label = (iter,0)
201
+ current_label = (iter, 0)
180
202
  if label is not None:
181
203
  current_label = (iter, label)
182
204
 
@@ -192,24 +214,35 @@ class Adapt:
192
214
  break
193
215
  if numpy.abs(max_grad) < self.parameters.max_gradient_convergence:
194
216
  if not self.parameters.silent:
195
- print("max pool gradient is {:+2.8f}, convergence criterion |max(grad)|<{} met".format(max_grad, self.parameters.max_gradient_convergence))
217
+ print(
218
+ "max pool gradient is {:+2.8f}, convergence criterion |max(grad)|<{} met".format(
219
+ max_grad, self.parameters.max_gradient_convergence
220
+ )
221
+ )
196
222
  break
197
223
 
198
224
  batch_size = self.parameters.batch_size
199
225
 
200
226
  # detect degeneracies
201
- degeneracies = [k for k in range(batch_size, len(grad_values))
202
- if numpy.isclose(grad_values[batch_size-1],grad_values[k], rtol=self.parameters.degeneracy_threshold) ]
227
+ degeneracies = [
228
+ k
229
+ for k in range(batch_size, len(grad_values))
230
+ if numpy.isclose(grad_values[batch_size - 1], grad_values[k], rtol=self.parameters.degeneracy_threshold)
231
+ ]
203
232
 
204
233
  if len(degeneracies) > 0:
205
234
  batch_size += len(degeneracies)
206
235
  if not self.parameters.silent:
207
- print("detected degeneracies: increasing batch size temporarily from {} to {}".format(self.parameters.batch_size, batch_size))
236
+ print(
237
+ "detected degeneracies: increasing batch size temporarily from {} to {}".format(
238
+ self.parameters.batch_size, batch_size
239
+ )
240
+ )
208
241
 
209
242
  count = 0
210
-
211
- op_names=[]
212
- for k,v in gradients.items():
243
+
244
+ op_names = []
245
+ for k, v in gradients.items():
213
246
  Ux = self.operator_pool.make_unitary(k, label=current_label)
214
247
  U += Ux
215
248
  op_names.append(Ux.extract_variables())
@@ -217,21 +250,19 @@ class Adapt:
217
250
  if count >= batch_size:
218
251
  break
219
252
 
220
- variables = {**variables, **{k:0.0 for k in U.extract_variables() if k not in variables}}
253
+ variables = {**variables, **{k: 0.0 for k in U.extract_variables() if k not in variables}}
221
254
  active_variables = [k for k in variables if k not in static_variables]
222
255
 
223
256
  objective = self.make_objective(U, variables=variables)
224
- margs = {"initial_values":variables}
225
- margs = {**margs,**self.parameters.compile_args,**self.parameters.optimizer_args}
226
- result = minimize(objective=objective,
227
- variables=active_variables,
228
- **margs)
229
-
257
+ margs = {"initial_values": variables}
258
+ margs = {**margs, **self.parameters.compile_args, **self.parameters.optimizer_args}
259
+ result = minimize(objective=objective, variables=active_variables, **margs)
260
+
230
261
  niter = len(result.history.energies)
231
262
  diff = energy - result.energy
232
263
  energy = result.energy
233
264
  variables = result.variables
234
-
265
+
235
266
  if not self.parameters.silent:
236
267
  print("-------------------------------------")
237
268
  print("Finished iteration {}".format(iter))
@@ -245,10 +276,10 @@ class Adapt:
245
276
  print("opt-iterations : {}".format(niter))
246
277
 
247
278
  screening_cycles += 1
248
- mini_iter=len(result.history.extract_energies())
279
+ mini_iter = len(result.history.extract_energies())
249
280
  gradient_expval = sum([v.count_expectationvalues() for k, v in grad(objective).items()])
250
- objective_expval_evaluations += mini_iter*objective.count_expectationvalues()
251
- gradient_expval_evaluations += mini_iter*gradient_expval
281
+ objective_expval_evaluations += mini_iter * objective.count_expectationvalues()
282
+ gradient_expval_evaluations += mini_iter * gradient_expval
252
283
  histories.append(result.history)
253
284
 
254
285
  if self.parameters.energy_convergence is not None and numpy.abs(diff) < self.parameters.energy_convergence:
@@ -263,26 +294,27 @@ class Adapt:
263
294
 
264
295
  @dataclasses.dataclass
265
296
  class AdaptReturn:
266
- U:QCircuit=None
267
- objective_factory:ObjectiveFactoryBase=None
268
- variables:dict=None
297
+ U: QCircuit = None
298
+ objective_factory: ObjectiveFactoryBase = None
299
+ variables: dict = None
269
300
  energy: float = None
270
301
  histories: list = None
271
302
  screening_cycles: int = None
272
- objective_expval_evaluations: int =None
273
- gradient_expval_evaluations: int =None
274
-
275
- return AdaptReturn(U=U,
276
- variables=variables,
277
- objective_factory=self.objective_factory,
278
- energy=energy,
279
- histories=histories,
280
- screening_cycles = screening_cycles,
281
- objective_expval_evaluations=objective_expval_evaluations,
282
- gradient_expval_evaluations=gradient_expval_evaluations)
303
+ objective_expval_evaluations: int = None
304
+ gradient_expval_evaluations: int = None
305
+
306
+ return AdaptReturn(
307
+ U=U,
308
+ variables=variables,
309
+ objective_factory=self.objective_factory,
310
+ energy=energy,
311
+ histories=histories,
312
+ screening_cycles=screening_cycles,
313
+ objective_expval_evaluations=objective_expval_evaluations,
314
+ gradient_expval_evaluations=gradient_expval_evaluations,
315
+ )
283
316
 
284
317
  def screen_gradients(self, U, variables, mp_pool=None):
285
-
286
318
  args = []
287
319
  for k in self.operator_pool:
288
320
  arg = {}
@@ -306,13 +338,14 @@ class Adapt:
306
338
  variables = {**arg["variables"]}
307
339
  objective = self.make_objective(Utmp, screening=True, variables=variables)
308
340
 
309
-
310
341
  dEs = []
311
342
  for k in Ux.extract_variables():
312
343
  variables[k] = 0.0
313
344
  dEs.append(grad(objective, k))
314
345
 
315
- gradients=[numpy.abs(simulate(objective=dE, variables=variables, **self.parameters.compile_args)) for dE in dEs]
346
+ gradients = [
347
+ numpy.abs(simulate(objective=dE, variables=variables, **self.parameters.compile_args)) for dE in dEs
348
+ ]
316
349
 
317
350
  return arg["k"], sum(gradients)
318
351
 
@@ -322,9 +355,9 @@ class Adapt:
322
355
  result += str("{:30} : {}\n".format("objective factory : ", self.objective_factory))
323
356
  return result
324
357
 
325
- class MolecularPool(AdaptPoolBase):
326
358
 
327
- def __init__(self, molecule, indices:str):
359
+ class MolecularPool(AdaptPoolBase):
360
+ def __init__(self, molecule, indices: str):
328
361
  """
329
362
 
330
363
  Parameters
@@ -340,8 +373,12 @@ class MolecularPool(AdaptPoolBase):
340
373
  self.molecule = molecule
341
374
 
342
375
  if isinstance(indices, str):
343
- if not "CC" in indices.upper():
344
- raise TequilaException("Pool of type {} not yet supported.\nCreate your own by passing the initialized indices".format(indices))
376
+ if "CC" not in indices.upper():
377
+ raise TequilaException(
378
+ "Pool of type {} not yet supported.\nCreate your own by passing the initialized indices".format(
379
+ indices
380
+ )
381
+ )
345
382
 
346
383
  generalized = True if "G" in indices.upper() else False
347
384
  paired = True if "P" in indices.upper() else False
@@ -349,42 +386,43 @@ class MolecularPool(AdaptPoolBase):
349
386
  doubles = True if "D" in indices.upper() else False
350
387
 
351
388
  indices = []
352
- if doubles: indices += self.make_indices_doubles(generalized=generalized, paired=paired)
353
- if singles: indices += self.make_indices_singles(generalized=generalized)
389
+ if doubles:
390
+ indices += self.make_indices_doubles(generalized=generalized, paired=paired)
391
+ if singles:
392
+ indices += self.make_indices_singles(generalized=generalized)
354
393
 
355
394
  indices = [tuple(k) for k in indices]
356
395
  super().__init__(generators=indices)
357
396
 
358
-
359
397
  def make_indices_singles(self, generalized=False):
360
398
  indices = []
361
- for p in range(self.molecule.n_electrons//2):
362
- for q in range(self.molecule.n_electrons//2, self.molecule.n_orbitals):
363
- indices.append([(2*p, 2*q)])
364
- indices.append([(2*p+1, 2*q+1)])
399
+ for p in range(self.molecule.n_electrons // 2):
400
+ for q in range(self.molecule.n_electrons // 2, self.molecule.n_orbitals):
401
+ indices.append([(2 * p, 2 * q)])
402
+ indices.append([(2 * p + 1, 2 * q + 1)])
365
403
  if not generalized:
366
404
  return indices
367
405
 
368
406
  for p in range(self.molecule.n_orbitals):
369
- for q in range(p+1, self.molecule.n_orbitals):
370
- if [(2*p, 2*q)] in indices:
407
+ for q in range(p + 1, self.molecule.n_orbitals):
408
+ if [(2 * p, 2 * q)] in indices:
371
409
  continue
372
- indices.append([(2*p, 2*q)])
373
- indices.append([(2*p+1, 2*q+1)])
410
+ indices.append([(2 * p, 2 * q)])
411
+ indices.append([(2 * p + 1, 2 * q + 1)])
374
412
  return self.sort_and_filter_unique_indices(indices)
375
413
 
376
414
  def make_indices_doubles(self, generalized=False, paired=True):
377
415
  indices = []
378
- for p in range(self.molecule.n_electrons//2):
379
- for q in range(self.molecule.n_electrons//2, self.molecule.n_orbitals):
380
- indices.append([(2*p, 2*q),(2*p+1, 2*q+1)])
416
+ for p in range(self.molecule.n_electrons // 2):
417
+ for q in range(self.molecule.n_electrons // 2, self.molecule.n_orbitals):
418
+ indices.append([(2 * p, 2 * q), (2 * p + 1, 2 * q + 1)])
381
419
 
382
420
  if not generalized:
383
421
  return indices
384
422
 
385
423
  for p in range(self.molecule.n_orbitals):
386
- for q in range(p+1, self.molecule.n_orbitals):
387
- idx = [(2*p, 2*q),(2*p+1, 2*q+1)]
424
+ for q in range(p + 1, self.molecule.n_orbitals):
425
+ idx = [(2 * p, 2 * q), (2 * p + 1, 2 * q + 1)]
388
426
  if idx in indices:
389
427
  continue
390
428
  indices.append(idx)
@@ -405,29 +443,34 @@ class MolecularPool(AdaptPoolBase):
405
443
  # sort as: [[(a,b),(c,d),(e,f)...],...]with a<c, a<b, c<d
406
444
  sorted_indices = []
407
445
  for idx in indices:
408
- idx = tuple([tuple(sorted(pair)) for pair in idx]) # sort internal pairs (a<b, c<d, etc)
446
+ idx = tuple([tuple(sorted(pair)) for pair in idx]) # sort internal pairs (a<b, c<d, etc)
409
447
  # avoid having orbitals show up multiple times in excitatin strings
410
- idx = tuple([pair for pair in idx if sum([1 for pair2 in idx if pair[0] in pair2 or pair[1] in pair2 ])==1 ])
448
+ idx = tuple(
449
+ [pair for pair in idx if sum([1 for pair2 in idx if pair[0] in pair2 or pair[1] in pair2]) == 1]
450
+ )
411
451
  if len(idx) == 0:
412
452
  continue
413
- idx = tuple(list(set(idx))) # avoid repetitions (like ((0,2),(0,2)))
414
- idx = tuple(sorted(idx, key=lambda x:x[0])) # sort pairs by first entry (a<c)
453
+ idx = tuple(list(set(idx))) # avoid repetitions (like ((0,2),(0,2)))
454
+ idx = tuple(sorted(idx, key=lambda x: x[0])) # sort pairs by first entry (a<c)
415
455
  sorted_indices.append(idx)
416
456
  return list(set(sorted_indices))
417
457
 
418
-
419
-
420
458
  def make_unitary(self, k, label):
421
- return self.molecule.make_excitation_gate(indices=self.generators[k], angle=(self.generators[k], label), assume_real=True)
459
+ return self.molecule.make_excitation_gate(
460
+ indices=self.generators[k], angle=(self.generators[k], label), assume_real=True
461
+ )
422
462
 
423
- class PseudoSingletMolecularPool(MolecularPool):
424
463
 
464
+ class PseudoSingletMolecularPool(MolecularPool):
425
465
  def __init__(self, *args, **kwargs):
426
466
  super().__init__(*args, **kwargs)
427
467
  indices = []
428
468
  for idx in self.generators:
429
469
  if len(idx) == 1:
430
- combined = ( ((idx[0][0]//2*2, idx[0][1]//2*2)), ((idx[0][0]//2*2+1, idx[0][1]//2*2+1)) )
470
+ combined = (
471
+ ((idx[0][0] // 2 * 2, idx[0][1] // 2 * 2)),
472
+ ((idx[0][0] // 2 * 2 + 1, idx[0][1] // 2 * 2 + 1)),
473
+ )
431
474
  if combined not in indices:
432
475
  indices.append(combined)
433
476
  else:
@@ -439,47 +482,45 @@ class PseudoSingletMolecularPool(MolecularPool):
439
482
  U = QCircuit()
440
483
  for idx in self.generators[k]:
441
484
  combined_variable = self.generators[k][0]
442
- U += self.molecule.make_excitation_gate(indices=idx, angle=(combined_variable,label))
485
+ U += self.molecule.make_excitation_gate(indices=idx, angle=(combined_variable, label))
443
486
  return U
444
487
 
445
- class ObjectiveFactorySequentialExcitedState(ObjectiveFactoryBase):
446
488
 
489
+ class ObjectiveFactorySequentialExcitedState(ObjectiveFactoryBase):
447
490
  def __init__(self, H, circuits: list, factors: list, *args, **kwargs):
448
491
  self.circuits = circuits
449
492
  self.factors = factors
450
493
  super().__init__(H=H, *args, **kwargs)
451
494
 
452
- def __call__(self, U, *args, **kwargs):
495
+ def __call__(self, U, *args, **kwargs):
453
496
  circuit = self.Upre + U + self.Upost
454
497
  objective = ExpectationValue(H=self.H, U=circuit)
455
498
  Qp = paulis.Qp(U.qubits)
456
499
  # get all overlaps
457
- for i,Ux in enumerate(self.circuits):
458
- S2 = ExpectationValue(H=Qp, U=circuit+Ux.dagger())
459
- objective += numpy.abs(self.factors[i])*S2
500
+ for i, Ux in enumerate(self.circuits):
501
+ S2 = ExpectationValue(H=Qp, U=circuit + Ux.dagger())
502
+ objective += numpy.abs(self.factors[i]) * S2
460
503
  return objective
461
504
 
462
- def run_molecular_adapt(molecule, operator_pool: str = None, Upre=None , Upost=None, *args, **kwargs):
463
505
 
506
+ def run_molecular_adapt(molecule, operator_pool: str = None, Upre=None, Upost=None, *args, **kwargs):
464
507
  if operator_pool is None:
465
508
  operator_pool = "UCCGSD"
466
509
 
467
510
  # auto-detect if we have an molecular pool
468
511
  # initialized by keyword
469
512
  # e.g. U(p)CC(G)(S)(D)
470
- ucc_signals=["u", "cc", "s", "d", "g"]
513
+ ucc_signals = ["u", "cc", "s", "d", "g"]
471
514
  if hasattr(operator_pool, "lower"):
472
515
  if any([s in operator_pool.lower() for s in ucc_signals]):
473
- operator_pool = MolecularPool(molecule=molecule, indices=operator_pool)
474
-
516
+ operator_pool = MolecularPool(molecule=molecule, indices=operator_pool)
517
+
475
518
  if Upre is None:
476
519
  Upre = molecule.prepare_reference()
477
520
 
478
521
  H = molecule.make_hamiltonian()
479
522
  solver = Adapt(operator_pool=operator_pool, H=H, Upre=Upre, Upost=Upost, *args, **kwargs)
480
-
523
+
481
524
  result = solver()
482
525
 
483
526
  return result
484
-
485
-
@@ -1 +1 @@
1
- from .krylov import krylov_method
1
+ from .krylov import krylov_method
@@ -1,20 +1,22 @@
1
1
  import copy
2
2
  import scipy
3
- from tequila import braket, QTensor, simulate
3
+ from tequila import BraKet, QTensor, simulate
4
4
  from tequila.hamiltonian.qubit_hamiltonian import QubitHamiltonian
5
5
 
6
6
 
7
- def krylov_method(krylov_circs:list, H:QubitHamiltonian, variables:dict=None, assume_real:bool=False, *args, **kwargs)->tuple:
7
+ def krylov_method(
8
+ krylov_circs: list, H: QubitHamiltonian, variables: dict = None, assume_real: bool = False, *args, **kwargs
9
+ ) -> tuple:
8
10
  """Function that applies Krylov method to an Hamiltonian operator,
9
- given the list of Krylov quantum circuits. If the circuits are parametrized
10
- also the variables need to be passed. The method returns the ground state energy
11
+ given the list of Krylov quantum circuits. If the circuits are parametrized
12
+ also the variables need to be passed. The method returns the ground state energy
11
13
  and the array of coefficients allowing to obtain an approximation of the ground state.
12
14
  Optional function arguments (*args, **kwargs) allows to change simulation options.
13
15
 
14
16
  Args:
15
17
  krylov_circs (list): List of Krylov circuits.
16
18
  H (QubitHamiltonian): Hamiltonian on which we want to apply Krylov method
17
- variables (dict, optional): Dicitionary containing possible variables to be stored in the Krylov circuits.
19
+ variables (dict, optional): Dicitionary containing possible variables to be stored in the Krylov circuits.
18
20
  Defaults to None.
19
21
  assume_real (bool): If set to True the function does not compute the imaginary part.
20
22
  Default to False.
@@ -22,32 +24,32 @@ def krylov_method(krylov_circs:list, H:QubitHamiltonian, variables:dict=None, as
22
24
  Returns:
23
25
  tuple(np.ndarray, np.ndarray): array of energies, array of krylov coefficients corresponding to the energies
24
26
  """
25
-
27
+
26
28
  n_krylov_states = len(krylov_circs)
27
- HM = QTensor(shape=[n_krylov_states,n_krylov_states])
28
- SM = QTensor(shape=[n_krylov_states,n_krylov_states])
29
-
29
+ HM = QTensor(shape=[n_krylov_states, n_krylov_states])
30
+ SM = QTensor(shape=[n_krylov_states, n_krylov_states])
31
+
30
32
  if variables is not None:
31
- krylov_circs_x = [U.map_variables(variables) for U in krylov_circs]
33
+ krylov_circs_x = [U.map_variables(variables) for U in krylov_circs]
32
34
  else:
33
35
  krylov_circs_x = copy.deepcopy(krylov_circs)
34
36
 
35
37
  for i in range(n_krylov_states):
36
- for j in range(i,n_krylov_states):
38
+ for j in range(i, n_krylov_states):
37
39
  if assume_real:
38
- h_real = braket(bra=krylov_circs_x[i], ket=krylov_circs_x[j], operator=H)[0]
40
+ h_real = BraKet(bra=krylov_circs_x[i], ket=krylov_circs_x[j], operator=H)[0]
39
41
  h_im = 0
40
42
  else:
41
- h_real, h_im = braket(bra=krylov_circs_x[i], ket=krylov_circs_x[j], operator=H)
42
- HM[i,j] = h_real + 1j*h_im
43
- HM[j,i] = h_real - 1j*h_im
44
- s_real, s_im = braket(bra=krylov_circs_x[i], ket=krylov_circs_x[j])
45
- SM[i,j] = s_real + 1j*s_im
46
- SM[j,i] = s_real - 1j*s_im
47
-
48
- h = simulate(HM, *args, **kwargs)
43
+ h_real, h_im = BraKet(bra=krylov_circs_x[i], ket=krylov_circs_x[j], operator=H)
44
+ HM[i, j] = h_real + 1j * h_im
45
+ HM[j, i] = h_real - 1j * h_im
46
+ s_real, s_im = BraKet(bra=krylov_circs_x[i], ket=krylov_circs_x[j])
47
+ SM[i, j] = s_real + 1j * s_im
48
+ SM[j, i] = s_real - 1j * s_im
49
+
50
+ h = simulate(HM, *args, **kwargs)
49
51
  s = simulate(SM, *args, **kwargs)
50
52
 
51
- v,vv = scipy.linalg.eigh(h,s)
53
+ v, vv = scipy.linalg.eigh(h, s)
52
54
 
53
55
  return v, vv
@@ -25,16 +25,16 @@ class PauliClique:
25
25
  -------
26
26
  """
27
27
  n_qubits = self.n_qubits
28
- eig = np.asarray([0.0 for n in range(2 ** n_qubits)], dtype=float)
28
+ eig = np.asarray([0.0 for n in range(2**n_qubits)], dtype=float)
29
29
  for ps in self.paulistrings:
30
- x = np.asarray([1.0 for n in range(2 ** n_qubits)], dtype=int)
30
+ x = np.asarray([1.0 for n in range(2**n_qubits)], dtype=int)
31
31
  paulis = [[1, 1]] * n_qubits
32
32
  for d in ps.keys():
33
33
  try:
34
34
  paulis[d] = [1, -1]
35
- except:
35
+ except Exception:
36
36
  raise Exception("weird {} with len={} with d={}".format(paulis, len(paulis), d))
37
- for i in range(2 ** n_qubits):
37
+ for i in range(2**n_qubits):
38
38
  binary_array = BitString.from_int(integer=i, nbits=n_qubits).array
39
39
  for j, k in enumerate(binary_array):
40
40
  x[i] *= paulis[j][k]
@@ -60,8 +60,12 @@ class PauliClique:
60
60
  for ps in self.paulistrings:
61
61
  normalized_ps.append(PauliString(coeff=ps.coeff / highest_abs, data=ps._data))
62
62
 
63
- return PauliClique(coeff=self.coeff * highest_abs, H=QubitHamiltonian.from_paulistrings(normalized_ps),
64
- U=self.U, n_qubits=self.n_qubits)
63
+ return PauliClique(
64
+ coeff=self.coeff * highest_abs,
65
+ H=QubitHamiltonian.from_paulistrings(normalized_ps),
66
+ U=self.U,
67
+ n_qubits=self.n_qubits,
68
+ )
65
69
 
66
70
  def naked(self):
67
71
  return PauliClique(coeff=1.0, H=self.H, U=self.U, n_qubits=self.n_qubits)