tequila-basic 1.9.9__py3-none-any.whl → 1.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. tequila/__init__.py +29 -14
  2. tequila/apps/__init__.py +14 -5
  3. tequila/apps/_unary_state_prep_impl.py +145 -112
  4. tequila/apps/adapt/__init__.py +9 -1
  5. tequila/apps/adapt/adapt.py +154 -113
  6. tequila/apps/krylov/__init__.py +1 -1
  7. tequila/apps/krylov/krylov.py +23 -21
  8. tequila/apps/robustness/helpers.py +10 -6
  9. tequila/apps/robustness/interval.py +238 -156
  10. tequila/apps/unary_state_prep.py +29 -23
  11. tequila/autograd_imports.py +8 -5
  12. tequila/circuit/__init__.py +2 -1
  13. tequila/circuit/_gates_impl.py +135 -67
  14. tequila/circuit/circuit.py +163 -79
  15. tequila/circuit/compiler.py +114 -105
  16. tequila/circuit/gates.py +288 -120
  17. tequila/circuit/gradient.py +35 -23
  18. tequila/circuit/noise.py +83 -74
  19. tequila/circuit/postselection.py +120 -0
  20. tequila/circuit/pyzx.py +10 -6
  21. tequila/circuit/qasm.py +201 -83
  22. tequila/circuit/qpic.py +63 -61
  23. tequila/grouping/binary_rep.py +148 -146
  24. tequila/grouping/binary_utils.py +84 -75
  25. tequila/grouping/compile_groups.py +334 -230
  26. tequila/grouping/ev_utils.py +77 -41
  27. tequila/grouping/fermionic_functions.py +383 -308
  28. tequila/grouping/fermionic_methods.py +170 -123
  29. tequila/grouping/overlapping_methods.py +69 -52
  30. tequila/hamiltonian/paulis.py +12 -13
  31. tequila/hamiltonian/paulistring.py +1 -1
  32. tequila/hamiltonian/qubit_hamiltonian.py +45 -35
  33. tequila/ml/__init__.py +1 -0
  34. tequila/ml/interface_torch.py +19 -16
  35. tequila/ml/ml_api.py +11 -10
  36. tequila/ml/utils_ml.py +12 -11
  37. tequila/objective/__init__.py +8 -3
  38. tequila/objective/braket.py +55 -47
  39. tequila/objective/objective.py +87 -55
  40. tequila/objective/qtensor.py +36 -27
  41. tequila/optimizers/__init__.py +31 -23
  42. tequila/optimizers/_containers.py +11 -7
  43. tequila/optimizers/optimizer_base.py +111 -83
  44. tequila/optimizers/optimizer_gd.py +258 -231
  45. tequila/optimizers/optimizer_gpyopt.py +56 -42
  46. tequila/optimizers/optimizer_scipy.py +157 -112
  47. tequila/quantumchemistry/__init__.py +66 -38
  48. tequila/quantumchemistry/chemistry_tools.py +393 -209
  49. tequila/quantumchemistry/encodings.py +121 -13
  50. tequila/quantumchemistry/madness_interface.py +170 -96
  51. tequila/quantumchemistry/orbital_optimizer.py +86 -41
  52. tequila/quantumchemistry/psi4_interface.py +166 -97
  53. tequila/quantumchemistry/pyscf_interface.py +70 -23
  54. tequila/quantumchemistry/qc_base.py +866 -414
  55. tequila/simulators/__init__.py +0 -3
  56. tequila/simulators/simulator_api.py +247 -105
  57. tequila/simulators/simulator_aqt.py +102 -0
  58. tequila/simulators/simulator_base.py +147 -53
  59. tequila/simulators/simulator_cirq.py +58 -42
  60. tequila/simulators/simulator_cudaq.py +600 -0
  61. tequila/simulators/simulator_ddsim.py +390 -0
  62. tequila/simulators/simulator_mqp.py +30 -0
  63. tequila/simulators/simulator_pyquil.py +190 -171
  64. tequila/simulators/simulator_qibo.py +95 -87
  65. tequila/simulators/simulator_qiskit.py +119 -107
  66. tequila/simulators/simulator_qlm.py +52 -26
  67. tequila/simulators/simulator_qulacs.py +74 -52
  68. tequila/simulators/simulator_spex.py +95 -60
  69. tequila/simulators/simulator_symbolic.py +6 -5
  70. tequila/simulators/test_spex_simulator.py +8 -11
  71. tequila/tools/convenience.py +4 -4
  72. tequila/tools/qng.py +72 -64
  73. tequila/tools/random_generators.py +38 -34
  74. tequila/utils/bitstrings.py +7 -7
  75. tequila/utils/exceptions.py +19 -5
  76. tequila/utils/joined_transformation.py +8 -10
  77. tequila/utils/keymap.py +0 -5
  78. tequila/utils/misc.py +6 -4
  79. tequila/version.py +1 -1
  80. tequila/wavefunction/qubit_wavefunction.py +47 -28
  81. {tequila_basic-1.9.9.dist-info → tequila_basic-1.9.10.dist-info}/METADATA +13 -16
  82. tequila_basic-1.9.10.dist-info/RECORD +93 -0
  83. {tequila_basic-1.9.9.dist-info → tequila_basic-1.9.10.dist-info}/WHEEL +1 -1
  84. tequila_basic-1.9.9.dist-info/RECORD +0 -88
  85. {tequila_basic-1.9.9.dist-info → tequila_basic-1.9.10.dist-info}/licenses/LICENSE +0 -0
  86. {tequila_basic-1.9.9.dist-info → tequila_basic-1.9.10.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,11 @@
1
1
  """
2
2
  Base class for Optimizers.
3
3
  """
4
- import typing, numbers, copy, warnings
4
+
5
+ import typing
6
+ import numbers
7
+ import copy
8
+ import warnings
5
9
 
6
10
  from tequila.utils.exceptions import TequilaException, TequilaWarning
7
11
  from tequila.simulators.simulator_api import compile, pick_backend
@@ -39,15 +43,16 @@ class OptimizerHistory:
39
43
  energy_calls: typing.List[numbers.Real] = field(default_factory=list)
40
44
  gradient_calls: typing.List[typing.Dict[str, numbers.Real]] = field(default_factory=list)
41
45
  angles_calls: typing.List[typing.Dict[str, numbers.Number]] = field(default_factory=list)
42
-
46
+
43
47
  # backward comp.
44
48
  @property
45
49
  def energies_calls(self):
46
50
  return self.energy_calls
51
+
47
52
  @property
48
53
  def energies_evaluations(self):
49
54
  return self.energy_calls
50
-
55
+
51
56
  def __add__(self, other):
52
57
  """
53
58
  magic method for convenient combination of history objects.
@@ -112,13 +117,15 @@ class OptimizerHistory:
112
117
  angles[i] = d[assign_variable(key)]
113
118
  return angles
114
119
 
115
- def plot(self,
116
- property: typing.Union[str, typing.List[str]] = 'energies',
117
- key: str = None,
118
- filename=None,
119
- baselines: typing.Dict[str, float] = None,
120
- *args, **kwargs):
121
-
120
+ def plot(
121
+ self,
122
+ property: typing.Union[str, typing.List[str]] = "energies",
123
+ key: str = None,
124
+ filename=None,
125
+ baselines: typing.Dict[str, float] = None,
126
+ *args,
127
+ **kwargs,
128
+ ):
122
129
  """
123
130
  Convenience function to plot the progress of the optimizer over time.
124
131
  Parameters
@@ -148,6 +155,7 @@ class OptimizerHistory:
148
155
 
149
156
  from matplotlib import pyplot as plt
150
157
  from matplotlib.ticker import MaxNLocator
158
+
151
159
  fig = plt.figure()
152
160
  fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
153
161
  import pickle
@@ -162,10 +170,10 @@ class OptimizerHistory:
162
170
  properties = property
163
171
 
164
172
  labels = None
165
- if 'labels' in kwargs:
166
- labels = kwargs['labels']
167
- elif 'label' in kwargs:
168
- labels = kwargs['label']
173
+ if "labels" in kwargs:
174
+ labels = kwargs["labels"]
175
+ elif "label" in kwargs:
176
+ labels = kwargs["label"]
169
177
 
170
178
  if hasattr(labels, "lower"):
171
179
  labels = [labels] * len(properties)
@@ -189,21 +197,26 @@ class OptimizerHistory:
189
197
  for i, p in enumerate(properties):
190
198
  try:
191
199
  label = labels[i]
192
- except:
200
+ except Exception:
193
201
  label = p
194
202
 
195
203
  if p == "energies":
196
204
  data = getattr(self, "extract_" + p)()
197
- plt.plot(list(data.keys()), list(data.values()), label=str(label), marker='o', linestyle='--')
205
+ plt.plot(list(data.keys()), list(data.values()), label=str(label), marker="o", linestyle="--")
198
206
  else:
199
207
  for k in keys[i]:
200
208
  data = getattr(self, "extract_" + p)(key=k)
201
- plt.plot(list(data.keys()), list(data.values()), label=str(label) + " " + str(k), marker='o',
202
- linestyle='--')
203
-
204
- loc = 'best'
205
- if 'loc' in kwargs:
206
- loc = kwargs['loc']
209
+ plt.plot(
210
+ list(data.keys()),
211
+ list(data.values()),
212
+ label=str(label) + " " + str(k),
213
+ marker="o",
214
+ linestyle="--",
215
+ )
216
+
217
+ loc = "best"
218
+ if "loc" in kwargs:
219
+ loc = kwargs["loc"]
207
220
  plt.legend(loc=loc)
208
221
  if filename is None:
209
222
  plt.show()
@@ -211,14 +224,13 @@ class OptimizerHistory:
211
224
  pickle.dump(fig, open(filename + ".pickle", "wb"))
212
225
  plt.savefig(fname=filename + ".pdf", **kwargs)
213
226
 
227
+
214
228
  @dataclass
215
229
  class OptimizerResults:
216
-
217
230
  energy: float = None
218
231
  history: OptimizerHistory = None
219
232
  variables: dict = None
220
233
 
221
-
222
234
  @property
223
235
  def angles(self):
224
236
  # allow backwards compatibility
@@ -226,7 +238,6 @@ class OptimizerResults:
226
238
 
227
239
 
228
240
  class Optimizer:
229
-
230
241
  """
231
242
  The base optimizer class, from which other optimizers inherit.
232
243
 
@@ -268,15 +279,20 @@ class Optimizer:
268
279
  convenience: build and compile (i.e render callable) the hessian of an objective.
269
280
 
270
281
  """
271
- def __init__(self, backend: str = None,
272
- maxiter: int = None,
273
- samples: int = None,
274
- device: str= None,
275
- noise=None,
276
- save_history: bool = True,
277
- silent: typing.Union[bool, int] = False,
278
- print_level: int = 99, *args, **kwargs):
279
282
 
283
+ def __init__(
284
+ self,
285
+ backend: str = None,
286
+ maxiter: int = None,
287
+ samples: int = None,
288
+ device: str = None,
289
+ noise=None,
290
+ save_history: bool = True,
291
+ silent: typing.Union[bool, int] = False,
292
+ print_level: int = 99,
293
+ *args,
294
+ **kwargs,
295
+ ):
280
296
  """
281
297
  initialize an optimizer.
282
298
 
@@ -308,7 +324,7 @@ class Optimizer:
308
324
  kwargs
309
325
  """
310
326
  if backend is None:
311
- self.backend = pick_backend(backend, samples=samples, noise=noise,device=device)
327
+ self.backend = pick_backend(backend, samples=samples, noise=noise, device=device)
312
328
  else:
313
329
  self.backend = backend
314
330
 
@@ -352,11 +368,14 @@ class Optimizer:
352
368
  """
353
369
  self.history = OptimizerHistory()
354
370
 
355
- def __call__(self, objective: Objective,
356
- variables: typing.List[Variable],
357
- initial_values: typing.Dict[Variable, numbers.Real] = None,
358
- *args,
359
- **kwargs) -> OptimizerResults:
371
+ def __call__(
372
+ self,
373
+ objective: Objective,
374
+ variables: typing.List[Variable],
375
+ initial_values: typing.Dict[Variable, numbers.Real] = None,
376
+ *args,
377
+ **kwargs,
378
+ ) -> OptimizerResults:
360
379
  """
361
380
  Optimize some objective with the optimizer.
362
381
 
@@ -410,18 +429,18 @@ class Optimizer:
410
429
  initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
411
430
  elif hasattr(initial_values, "lower"):
412
431
  if initial_values.lower() == "zero":
413
- initial_values = {k:0.0 for k in all_variables}
432
+ initial_values = {k: 0.0 for k in all_variables}
414
433
  elif "zero" in initial_values.lower():
415
- scale=0.1
434
+ scale = 0.1
416
435
  if "scale" in initial_values.lower():
417
436
  # pass as: near_zero_scale=0.1_...
418
437
  scale = float(initial_values.split("scale")[1].split("_")[0].split("=")[1])
419
438
  initial_values = {k: numpy.random.normal(loc=0.0, scale=scale) for k in all_variables}
420
439
  elif initial_values.lower() == "random":
421
- initial_values = {k: numpy.random.uniform(0.0, 4*numpy.pi) for k in all_variables}
440
+ initial_values = {k: numpy.random.uniform(0.0, 4 * numpy.pi) for k in all_variables}
422
441
  elif "random" in initial_values.lower():
423
- scale=2*numpy.pi
424
- loc=0.0
442
+ scale = 2 * numpy.pi
443
+ loc = 0.0
425
444
  if "scale" in initial_values.lower():
426
445
  scale = float(initial_values.split("scale")[1].split("_")[0].split("=")[1])
427
446
  if "loc" in initial_values.lower():
@@ -469,18 +488,19 @@ class Optimizer:
469
488
  Objective:
470
489
  a compiled Objective. Types vary.
471
490
  """
472
- return compile(objective=objective,
473
- samples=self.samples,
474
- backend=self.backend,
475
- device=self.device,
476
- noise=self.noise,
477
- *args, **kwargs)
491
+ return compile(
492
+ objective=objective,
493
+ samples=self.samples,
494
+ backend=self.backend,
495
+ device=self.device,
496
+ noise=self.noise,
497
+ *args,
498
+ **kwargs,
499
+ )
478
500
 
479
- def compile_gradient(self, objective: Objective,
480
- variables: typing.List[Variable],
481
- gradient=None,
482
- *args, **kwargs) -> typing.Tuple[
483
- typing.Dict, typing.Dict]:
501
+ def compile_gradient(
502
+ self, objective: Objective, variables: typing.List[Variable], gradient=None, *args, **kwargs
503
+ ) -> typing.Tuple[typing.Dict, typing.Dict]:
484
504
  """
485
505
  convenience function to compile gradient objects and relavant types. For use by inheritors.
486
506
 
@@ -509,7 +529,7 @@ class Optimizer:
509
529
  if all([isinstance(x, Objective) for x in gradient.values()]):
510
530
  dO = gradient
511
531
  compiled_grad = {k: self.compile_objective(objective=dO[k], *args, **kwargs) for k in variables}
512
- elif 'method' in gradient and gradient['method'] == 'standard_spsa':
532
+ elif "method" in gradient and gradient["method"] == "standard_spsa":
513
533
  dO = None
514
534
  compiled = self.compile_objective(objective=objective)
515
535
  compiled_grad = _SPSAGrad(objective=compiled, variables=variables, **gradient)
@@ -519,17 +539,20 @@ class Optimizer:
519
539
  compiled_grad = {k: _NumGrad(objective=compiled, variable=k, **gradient) for k in variables}
520
540
  else:
521
541
  raise TequilaOptimizerException(
522
- "unknown gradient instruction of type {} : {}".format(type(gradient), gradient))
542
+ "unknown gradient instruction of type {} : {}".format(type(gradient), gradient)
543
+ )
523
544
 
524
545
  return dO, compiled_grad
525
546
 
526
- def compile_hessian(self,
527
- variables: typing.List[Variable],
528
- grad_obj: typing.Dict[Variable, Objective],
529
- comp_grad_obj: typing.Dict[Variable, Objective],
530
- hessian: dict = None,
531
- *args,
532
- **kwargs) -> tuple:
547
+ def compile_hessian(
548
+ self,
549
+ variables: typing.List[Variable],
550
+ grad_obj: typing.Dict[Variable, Objective],
551
+ comp_grad_obj: typing.Dict[Variable, Objective],
552
+ hessian: dict = None,
553
+ *args,
554
+ **kwargs,
555
+ ) -> tuple:
533
556
  """
534
557
  convenience function to compile hessians for optimizers which require it.
535
558
  Parameters
@@ -555,8 +578,11 @@ class Optimizer:
555
578
 
556
579
  if hessian is None:
557
580
  if dO is None:
558
- raise TequilaOptimizerException("Can not combine analytical Hessian with numerical Gradient\n"
559
- "hessian instruction was: {}".format(hessian))
581
+ raise TequilaOptimizerException(
582
+ "Can not combine analytical Hessian with numerical Gradient\nhessian instruction was: {}".format(
583
+ hessian
584
+ )
585
+ )
560
586
 
561
587
  compiled_hessian = {}
562
588
  ddO = {}
@@ -571,8 +597,9 @@ class Optimizer:
571
597
  elif isinstance(hessian, dict):
572
598
  if all([isinstance(x, Objective) for x in hessian.values()]):
573
599
  ddO = hessian
574
- compiled_hessian = {k: self.compile_objective(objective=ddO[k], *args, **kwargs) for k in
575
- hessian.keys()}
600
+ compiled_hessian = {
601
+ k: self.compile_objective(objective=ddO[k], *args, **kwargs) for k in hessian.keys()
602
+ }
576
603
  else:
577
604
  ddO = None
578
605
  compiled_hessian = {}
@@ -596,7 +623,7 @@ class Optimizer:
596
623
 
597
624
 
598
625
  class _NumGrad:
599
- """ Numerical Gradient object.
626
+ """Numerical Gradient object.
600
627
 
601
628
  Should not be used outside of optimizers.
602
629
  Can't interact with other tequila structures.
@@ -771,8 +798,9 @@ class _NumGrad:
771
798
  """
772
799
  return self.objective.count_expectationvalues(*args, **kwargs)
773
800
 
801
+
774
802
  class _SPSAGrad(_NumGrad):
775
- """ Simultaneous Perturbation Stochastic Approximation Gradient object.
803
+ """Simultaneous Perturbation Stochastic Approximation Gradient object.
776
804
 
777
805
  Should not be used outside of optimizers.
778
806
  Can't interact with other tequila structures.
@@ -789,7 +817,7 @@ class _SPSAGrad(_NumGrad):
789
817
 
790
818
  """
791
819
 
792
- def __init__(self, objective, variables, stepsize, gamma=None,method=None):
820
+ def __init__(self, objective, variables, stepsize, gamma=None, method=None):
793
821
  """
794
822
 
795
823
  Parameters
@@ -810,7 +838,7 @@ class _SPSAGrad(_NumGrad):
810
838
 
811
839
  if isinstance(stepsize, list):
812
840
  self.nextIndex = 0
813
- elif gamma != None:
841
+ elif gamma is not None:
814
842
  self.nextIndex = "adjust"
815
843
  else:
816
844
  self.nextIndex = -1
@@ -843,7 +871,7 @@ class _SPSAGrad(_NumGrad):
843
871
 
844
872
  """
845
873
  dim = len(keys)
846
- perturbation_vector = choices([-1,1],k = dim)
874
+ perturbation_vector = choices([-1, 1], k=dim)
847
875
  left = copy.deepcopy(vars)
848
876
  right = copy.deepcopy(vars)
849
877
  for i, key in enumerate(keys):
@@ -872,15 +900,15 @@ class _SPSAGrad(_NumGrad):
872
900
  type:
873
901
  generally, float, the result of the numerical gradient.
874
902
  """
875
- if(self.nextIndex != -1 and self.nextIndex != "adjust"):
903
+ if self.nextIndex != -1 and self.nextIndex != "adjust":
876
904
  stepsize = self.stepsize[self.nextIndex]
877
- if(self.nextIndex != len(self.stepsize) - 1):
905
+ if self.nextIndex != len(self.stepsize) - 1:
878
906
  self.nextIndex += 1
879
- elif(self.nextIndex == -1):
907
+ elif self.nextIndex == -1:
880
908
  stepsize = self.stepsize
881
909
  else:
882
- stepsize = self.stepsize / (iteration ** self.gamma)
883
-
910
+ stepsize = self.stepsize / (iteration**self.gamma)
911
+
884
912
  return self.method(self.objective, variables, self.variables, stepsize, *args, **kwargs)
885
913
 
886
914
  def calibrated_lr(self, lr, initial_value, max_iter, *args, **kwargs):
@@ -904,19 +932,19 @@ class _SPSAGrad(_NumGrad):
904
932
  """
905
933
  dim = len(initial_value)
906
934
  delta = 0
907
- if(self.nextIndex != -1 and self.nextIndex != "adjust"):
935
+ if self.nextIndex != -1 and self.nextIndex != "adjust":
908
936
  stepsize = self.stepsize[0]
909
937
  else:
910
938
  stepsize = self.stepsize
911
-
939
+
912
940
  for i in range(max_iter):
913
- perturbation_vector = choices([-1,1],k = dim)
941
+ perturbation_vector = choices([-1, 1], k=dim)
914
942
  left = copy.deepcopy(initial_value)
915
943
  right = copy.deepcopy(initial_value)
916
944
  for j, v in enumerate(initial_value):
917
945
  left[v] += perturbation_vector[j] * stepsize
918
946
  right[v] -= perturbation_vector[j] * stepsize
919
- numeratorLeft = self.objective(left, *args, **kwargs)
947
+ numeratorLeft = self.objective(left, *args, **kwargs)
920
948
  numeratorRight = self.objective(right, *args, **kwargs)
921
949
  delta += numpy.absolute(numeratorRight - numeratorLeft) / max_iter
922
- return lr * 2 * stepsize / delta
950
+ return lr * 2 * stepsize / delta