tequila-basic 1.9.8__py3-none-any.whl → 1.9.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tequila/__init__.py +29 -14
- tequila/apps/__init__.py +14 -5
- tequila/apps/_unary_state_prep_impl.py +145 -112
- tequila/apps/adapt/__init__.py +9 -1
- tequila/apps/adapt/adapt.py +154 -113
- tequila/apps/krylov/__init__.py +1 -1
- tequila/apps/krylov/krylov.py +23 -21
- tequila/apps/robustness/helpers.py +10 -6
- tequila/apps/robustness/interval.py +238 -156
- tequila/apps/unary_state_prep.py +29 -23
- tequila/autograd_imports.py +8 -5
- tequila/circuit/__init__.py +2 -1
- tequila/circuit/_gates_impl.py +135 -67
- tequila/circuit/circuit.py +177 -88
- tequila/circuit/compiler.py +114 -105
- tequila/circuit/gates.py +288 -120
- tequila/circuit/gradient.py +35 -23
- tequila/circuit/noise.py +83 -74
- tequila/circuit/postselection.py +120 -0
- tequila/circuit/pyzx.py +10 -6
- tequila/circuit/qasm.py +201 -83
- tequila/circuit/qpic.py +63 -61
- tequila/grouping/binary_rep.py +148 -146
- tequila/grouping/binary_utils.py +84 -75
- tequila/grouping/compile_groups.py +334 -230
- tequila/grouping/ev_utils.py +77 -41
- tequila/grouping/fermionic_functions.py +383 -308
- tequila/grouping/fermionic_methods.py +170 -123
- tequila/grouping/overlapping_methods.py +69 -52
- tequila/hamiltonian/paulis.py +12 -13
- tequila/hamiltonian/paulistring.py +1 -1
- tequila/hamiltonian/qubit_hamiltonian.py +45 -35
- tequila/ml/__init__.py +1 -0
- tequila/ml/interface_torch.py +19 -16
- tequila/ml/ml_api.py +11 -10
- tequila/ml/utils_ml.py +12 -11
- tequila/objective/__init__.py +8 -3
- tequila/objective/braket.py +55 -47
- tequila/objective/objective.py +91 -56
- tequila/objective/qtensor.py +36 -27
- tequila/optimizers/__init__.py +31 -23
- tequila/optimizers/_containers.py +11 -7
- tequila/optimizers/optimizer_base.py +111 -83
- tequila/optimizers/optimizer_gd.py +258 -231
- tequila/optimizers/optimizer_gpyopt.py +56 -42
- tequila/optimizers/optimizer_scipy.py +157 -112
- tequila/quantumchemistry/__init__.py +66 -38
- tequila/quantumchemistry/chemistry_tools.py +394 -203
- tequila/quantumchemistry/encodings.py +121 -13
- tequila/quantumchemistry/madness_interface.py +170 -96
- tequila/quantumchemistry/orbital_optimizer.py +86 -40
- tequila/quantumchemistry/psi4_interface.py +166 -97
- tequila/quantumchemistry/pyscf_interface.py +70 -23
- tequila/quantumchemistry/qc_base.py +866 -414
- tequila/simulators/__init__.py +0 -3
- tequila/simulators/simulator_api.py +258 -106
- tequila/simulators/simulator_aqt.py +102 -0
- tequila/simulators/simulator_base.py +156 -55
- tequila/simulators/simulator_cirq.py +58 -42
- tequila/simulators/simulator_cudaq.py +600 -0
- tequila/simulators/simulator_ddsim.py +390 -0
- tequila/simulators/simulator_mqp.py +30 -0
- tequila/simulators/simulator_pyquil.py +190 -171
- tequila/simulators/simulator_qibo.py +95 -87
- tequila/simulators/simulator_qiskit.py +124 -114
- tequila/simulators/simulator_qlm.py +52 -26
- tequila/simulators/simulator_qulacs.py +85 -59
- tequila/simulators/simulator_spex.py +464 -0
- tequila/simulators/simulator_symbolic.py +6 -5
- tequila/simulators/test_spex_simulator.py +208 -0
- tequila/tools/convenience.py +4 -4
- tequila/tools/qng.py +72 -64
- tequila/tools/random_generators.py +38 -34
- tequila/utils/bitstrings.py +13 -7
- tequila/utils/exceptions.py +19 -5
- tequila/utils/joined_transformation.py +8 -10
- tequila/utils/keymap.py +0 -5
- tequila/utils/misc.py +6 -4
- tequila/version.py +1 -1
- tequila/wavefunction/qubit_wavefunction.py +52 -30
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/METADATA +23 -17
- tequila_basic-1.9.10.dist-info/RECORD +93 -0
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/WHEEL +1 -1
- tequila_basic-1.9.8.dist-info/RECORD +0 -86
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info/licenses}/LICENSE +0 -0
- {tequila_basic-1.9.8.dist-info → tequila_basic-1.9.10.dist-info}/top_level.txt +0 -0
tequila/objective/objective.py
CHANGED
@@ -1,4 +1,7 @@
|
|
1
|
-
import typing
|
1
|
+
import typing
|
2
|
+
import copy
|
3
|
+
import numbers
|
4
|
+
from typing import Union
|
2
5
|
from tequila.grouping.compile_groups import compile_commuting_parts
|
3
6
|
from tequila import TequilaException
|
4
7
|
from tequila.utils import JoinedTransformation
|
@@ -10,6 +13,7 @@ import collections
|
|
10
13
|
|
11
14
|
# convenience
|
12
15
|
|
16
|
+
|
13
17
|
class ExpectationValueImpl:
|
14
18
|
"""
|
15
19
|
Implements the (uncompiled) Expectation Value as a class. Should not be called directly.
|
@@ -113,9 +117,12 @@ class ExpectationValueImpl:
|
|
113
117
|
the ExpectationValueImpl structure with mapped qubits
|
114
118
|
|
115
119
|
"""
|
116
|
-
return ExpectationValueImpl(
|
117
|
-
|
118
|
-
|
120
|
+
return ExpectationValueImpl(
|
121
|
+
H=tuple([H.map_qubits(qubit_map=qubit_map) for H in self.H]),
|
122
|
+
U=self.U.map_qubits(qubit_map=qubit_map),
|
123
|
+
contraction=self._contraction,
|
124
|
+
shape=self._shape,
|
125
|
+
)
|
119
126
|
|
120
127
|
def map_variables(self, variables: dict, *args, **kwargs):
|
121
128
|
"""
|
@@ -129,17 +136,25 @@ class ExpectationValueImpl:
|
|
129
136
|
Circuit with changed variables
|
130
137
|
|
131
138
|
"""
|
132
|
-
return ExpectationValueImpl(
|
133
|
-
|
139
|
+
return ExpectationValueImpl(
|
140
|
+
H=self.H,
|
141
|
+
U=self.U.map_variables(variables=variables, *args, **kwargs),
|
142
|
+
contraction=self._contraction,
|
143
|
+
shape=self._shape,
|
144
|
+
)
|
134
145
|
|
135
146
|
def __call__(self, *args, **kwargs):
|
136
147
|
raise TequilaException(
|
137
|
-
"Tried to call uncompiled ExpectationValueImpl, compile your objective before calling with tq.compile(objective) or evaluate with tq.simulate(objective)"
|
148
|
+
"Tried to call uncompiled ExpectationValueImpl, compile your objective before calling with tq.compile(objective) or evaluate with tq.simulate(objective)"
|
149
|
+
)
|
138
150
|
|
139
151
|
def info(self, short=True, *args, **kwargs):
|
140
152
|
if short:
|
141
|
-
print(
|
142
|
-
qubits
|
153
|
+
print(
|
154
|
+
"Expectation Value with {qubits} active qubits and {paulis} paulistrings".format(
|
155
|
+
qubits=len(self.U.qubits), paulis=len(self.H)
|
156
|
+
)
|
157
|
+
)
|
143
158
|
else:
|
144
159
|
print("Hamiltonian:\n", str(self.H))
|
145
160
|
print("\n", str(self.U))
|
@@ -256,7 +271,7 @@ class Objective:
|
|
256
271
|
"""
|
257
272
|
variables = []
|
258
273
|
for arg in self.args:
|
259
|
-
if hasattr(arg,
|
274
|
+
if hasattr(arg, "extract_variables"):
|
260
275
|
variables += arg.extract_variables()
|
261
276
|
else:
|
262
277
|
variables += []
|
@@ -303,7 +318,6 @@ class Objective:
|
|
303
318
|
|
304
319
|
@property
|
305
320
|
def args(self) -> typing.Tuple:
|
306
|
-
|
307
321
|
if self._args is None:
|
308
322
|
return tuple()
|
309
323
|
else:
|
@@ -396,7 +410,7 @@ class Objective:
|
|
396
410
|
|
397
411
|
def __invert__(self):
|
398
412
|
new = Objective(args=[self])
|
399
|
-
return new
|
413
|
+
return new**-1
|
400
414
|
|
401
415
|
@classmethod
|
402
416
|
def unary_operator(cls, left, op):
|
@@ -417,8 +431,7 @@ class Objective:
|
|
417
431
|
Objective representing op applied to objective left.
|
418
432
|
|
419
433
|
"""
|
420
|
-
return Objective(args=left.args,
|
421
|
-
transformation=lambda *args: op(left.transformation(*args)))
|
434
|
+
return Objective(args=left.args, transformation=lambda *args: op(left.transformation(*args)))
|
422
435
|
|
423
436
|
@classmethod
|
424
437
|
def binary_operator(cls, left, right, op):
|
@@ -450,18 +463,23 @@ class Objective:
|
|
450
463
|
return cls.unary_operator(left=left, op=lambda E: op(E, right))
|
451
464
|
else:
|
452
465
|
raise TequilaException(
|
453
|
-
|
466
|
+
"BinaryOperator method called on types " + str(type(left)) + "," + str(type(right))
|
467
|
+
)
|
454
468
|
elif isinstance(left, numbers.Number):
|
455
469
|
if isinstance(right, Objective):
|
456
470
|
return cls.unary_operator(left=right, op=lambda E: op(left, E))
|
457
471
|
else:
|
458
472
|
raise TequilaException(
|
459
|
-
|
473
|
+
"BinaryOperator method called on types " + str(type(left)) + "," + str(type(right))
|
474
|
+
)
|
460
475
|
else:
|
461
476
|
split_at = len(left.args)
|
462
|
-
return Objective(
|
463
|
-
|
464
|
-
|
477
|
+
return Objective(
|
478
|
+
args=left.args + right.args,
|
479
|
+
transformation=JoinedTransformation(
|
480
|
+
left=left.transformation, right=right.transformation, split=split_at, op=op
|
481
|
+
),
|
482
|
+
)
|
465
483
|
|
466
484
|
def wrap(self, op):
|
467
485
|
"""
|
@@ -540,12 +558,14 @@ class Objective:
|
|
540
558
|
|
541
559
|
unique = self.count_expectationvalues(unique=True)
|
542
560
|
measurements = self.count_measurements()
|
543
|
-
return
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
561
|
+
return (
|
562
|
+
"Objective with {} unique expectation values\n"
|
563
|
+
"total measurements = {}\n"
|
564
|
+
"variables = {}\n"
|
565
|
+
"types = {}".format(unique, measurements, variables, types)
|
566
|
+
)
|
567
|
+
|
568
|
+
def __call__(self, variables=None, initial_state=0, *args, **kwargs):
|
549
569
|
"""
|
550
570
|
Return the output of the calculation the objective represents.
|
551
571
|
|
@@ -553,6 +573,8 @@ class Objective:
|
|
553
573
|
----------
|
554
574
|
variables: dict:
|
555
575
|
dictionary instantiating all variables that may appear within the objective.
|
576
|
+
initial_state: int or QubitWaveFunction:
|
577
|
+
the initial state of the circuit
|
556
578
|
args
|
557
579
|
kwargs
|
558
580
|
|
@@ -565,27 +587,28 @@ class Objective:
|
|
565
587
|
# failsafe
|
566
588
|
check_variables = {k: k in variables for k in self.extract_variables()}
|
567
589
|
if not all(list(check_variables.values())):
|
568
|
-
raise TequilaException(
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
590
|
+
raise TequilaException(
|
591
|
+
"Objective did not receive all variables:\n"
|
592
|
+
"You gave\n"
|
593
|
+
" {}\n"
|
594
|
+
" but the objective depends on\n"
|
595
|
+
" {}\n"
|
596
|
+
" missing values for\n"
|
597
|
+
" {}".format(variables, self.extract_variables(), [k for k, v in check_variables.items() if not v])
|
598
|
+
)
|
576
599
|
|
577
600
|
# avoid multiple evaluations
|
578
601
|
evaluated = {}
|
579
602
|
ev_array = []
|
580
603
|
for E in self.args:
|
581
604
|
if E not in evaluated: #
|
582
|
-
expval_result = E(variables=variables, *args, **kwargs)
|
605
|
+
expval_result = E(variables=variables, initial_state=initial_state, *args, **kwargs)
|
583
606
|
evaluated[E] = expval_result
|
584
607
|
else:
|
585
608
|
expval_result = evaluated[E]
|
586
609
|
try:
|
587
610
|
expval_result = float(expval_result)
|
588
|
-
except:
|
611
|
+
except Exception:
|
589
612
|
pass # allow array evaluation (non-standard operation)
|
590
613
|
ev_array.append(expval_result)
|
591
614
|
result = onp.asarray(self.transformation(*ev_array), dtype=float)
|
@@ -595,7 +618,7 @@ class Objective:
|
|
595
618
|
return float(result[0])
|
596
619
|
else:
|
597
620
|
return result
|
598
|
-
|
621
|
+
|
599
622
|
def contract(self):
|
600
623
|
"""
|
601
624
|
Exists only to be convient in optimizers, which all contract over VectrObjectives.
|
@@ -608,19 +631,20 @@ class Objective:
|
|
608
631
|
|
609
632
|
def __len__(self):
|
610
633
|
return 1
|
611
|
-
|
634
|
+
|
612
635
|
def is_translated(self):
|
613
636
|
"""
|
614
637
|
check if the objective was already translated to a quantum backend
|
615
638
|
"""
|
616
639
|
types = [type(E) for E in self.get_expectationvalues()]
|
617
640
|
types = list(set(types))
|
618
|
-
if len(types)==0 or (ExpectationValueImpl in types and len(types)==1):
|
641
|
+
if len(types) == 0 or (ExpectationValueImpl in types and len(types) == 1):
|
619
642
|
return False
|
620
643
|
else:
|
621
644
|
return True
|
622
645
|
|
623
|
-
|
646
|
+
|
647
|
+
def ExpectationValue(U, H, optimize_measurements=False, *args, **kwargs) -> Objective:
|
624
648
|
"""
|
625
649
|
Initialize an Objective which is just a single expectationvalue
|
626
650
|
"""
|
@@ -758,8 +782,10 @@ class Variable:
|
|
758
782
|
new = Objective(args=[self, other], transformation=op)
|
759
783
|
else:
|
760
784
|
raise TequilaException(
|
761
|
-
"unknown type in left_helper of objective arithmetics with operation {}: {}".format(
|
762
|
-
|
785
|
+
"unknown type in left_helper of objective arithmetics with operation {}: {}".format(
|
786
|
+
type(op), type(other)
|
787
|
+
)
|
788
|
+
)
|
763
789
|
return new
|
764
790
|
|
765
791
|
def _right_helper(self, op, other):
|
@@ -779,8 +805,10 @@ class Variable:
|
|
779
805
|
new = Objective(args=[other, self], transformation=op)
|
780
806
|
else:
|
781
807
|
raise TequilaException(
|
782
|
-
"unknown type in left_helper of objective arithmetics with operation {}: {}".format(
|
783
|
-
|
808
|
+
"unknown type in left_helper of objective arithmetics with operation {}: {}".format(
|
809
|
+
type(op), type(other)
|
810
|
+
)
|
811
|
+
)
|
784
812
|
return new
|
785
813
|
|
786
814
|
def __mul__(self, other):
|
@@ -796,7 +824,7 @@ class Variable:
|
|
796
824
|
return self._left_helper(numpy.true_divide, other)
|
797
825
|
|
798
826
|
def __neg__(self):
|
799
|
-
return Objective(args=[self], transformation=lambda v: numpy.multiply(v, -1.))
|
827
|
+
return Objective(args=[self], transformation=lambda v: numpy.multiply(v, -1.0))
|
800
828
|
|
801
829
|
def __pow__(self, other):
|
802
830
|
return self._left_helper(numpy.power, other)
|
@@ -815,7 +843,7 @@ class Variable:
|
|
815
843
|
|
816
844
|
def __invert__(self):
|
817
845
|
new = Objective(args=[self])
|
818
|
-
return new
|
846
|
+
return new**-1.0
|
819
847
|
|
820
848
|
def __len__(self):
|
821
849
|
return 1
|
@@ -827,7 +855,7 @@ class Variable:
|
|
827
855
|
return True
|
828
856
|
|
829
857
|
def apply(self, other):
|
830
|
-
assert
|
858
|
+
assert callable(other)
|
831
859
|
return Objective(args=[self], transformation=other)
|
832
860
|
|
833
861
|
def wrap(self, other):
|
@@ -838,6 +866,7 @@ class Variable:
|
|
838
866
|
|
839
867
|
def toJson(self):
|
840
868
|
import json
|
869
|
+
|
841
870
|
return json.dumps(self, default=lambda o: o.__dict__)
|
842
871
|
|
843
872
|
|
@@ -857,15 +886,16 @@ class FixedVariable(float):
|
|
857
886
|
return self
|
858
887
|
|
859
888
|
def apply(self, other):
|
860
|
-
assert
|
889
|
+
assert callable(other)
|
861
890
|
return Objective(args=[self], transformation=other)
|
862
891
|
|
863
892
|
def wrap(self, other):
|
864
893
|
return self.apply(other)
|
865
|
-
|
894
|
+
|
866
895
|
def map_variables(self, *args, **kwargs):
|
867
896
|
return self
|
868
897
|
|
898
|
+
|
869
899
|
def format_variable_list(variables: typing.List[typing.Hashable]) -> typing.List[Variable]:
|
870
900
|
"""
|
871
901
|
Convenience functions to assign tequila variables.
|
@@ -885,8 +915,9 @@ def format_variable_list(variables: typing.List[typing.Hashable]) -> typing.List
|
|
885
915
|
return [assign_variable(k) for k in variables]
|
886
916
|
|
887
917
|
|
888
|
-
def format_variable_dictionary(
|
889
|
-
|
918
|
+
def format_variable_dictionary(
|
919
|
+
variables: typing.Dict[typing.Hashable, typing.Any],
|
920
|
+
) -> typing.Dict[Variable, typing.Any]:
|
890
921
|
"""
|
891
922
|
Convenience function to assign tequila variables.
|
892
923
|
Parameters
|
@@ -905,8 +936,9 @@ def format_variable_dictionary(variables: typing.Dict[typing.Hashable, typing.An
|
|
905
936
|
return Variables(variables)
|
906
937
|
|
907
938
|
|
908
|
-
def assign_variable(
|
909
|
-
Variable, FixedVariable]
|
939
|
+
def assign_variable(
|
940
|
+
variable: typing.Union[typing.Hashable, numbers.Real, Variable, FixedVariable],
|
941
|
+
) -> typing.Union[Variable, FixedVariable]:
|
910
942
|
"""
|
911
943
|
Convenience function; maps various objects into Variable, FixedVariable, or Variables, for easy duck-typing.
|
912
944
|
|
@@ -917,7 +949,7 @@ def assign_variable(variable: typing.Union[typing.Hashable, numbers.Real, Variab
|
|
917
949
|
|
918
950
|
Raises
|
919
951
|
------
|
920
|
-
|
952
|
+
|
921
953
|
TequilaVariableException
|
922
954
|
|
923
955
|
|
@@ -942,8 +974,11 @@ def assign_variable(variable: typing.Union[typing.Hashable, numbers.Real, Variab
|
|
942
974
|
return Variable(name=variable)
|
943
975
|
else:
|
944
976
|
raise TequilaVariableException(
|
945
|
-
"Only hashable types can be assigned to Variables. You passed down "
|
946
|
-
|
977
|
+
"Only hashable types can be assigned to Variables. You passed down "
|
978
|
+
+ str(variable)
|
979
|
+
+ " type="
|
980
|
+
+ str(type(variable))
|
981
|
+
)
|
947
982
|
|
948
983
|
|
949
984
|
class Variables(collections.abc.MutableMapping):
|
@@ -985,5 +1020,5 @@ class Variables(collections.abc.MutableMapping):
|
|
985
1020
|
return result
|
986
1021
|
|
987
1022
|
def __repr__(self):
|
988
|
-
xdict = {k:v for k,v in self.items()}
|
1023
|
+
xdict = {k: v for k, v in self.items()}
|
989
1024
|
return xdict.__repr__()
|
tequila/objective/qtensor.py
CHANGED
@@ -1,7 +1,9 @@
|
|
1
|
-
import numpy
|
1
|
+
import numpy
|
2
|
+
import typing
|
2
3
|
from .objective import Objective, ExpectationValueImpl, format_variable_dictionary
|
3
4
|
from tequila import TequilaException
|
4
5
|
|
6
|
+
|
5
7
|
class QTensor(numpy.ndarray):
|
6
8
|
# see here: https://numpy.org/devdocs/user/basics.subclassing.html
|
7
9
|
|
@@ -15,17 +17,17 @@ class QTensor(numpy.ndarray):
|
|
15
17
|
# do all-zero initialization
|
16
18
|
shape = self.shape
|
17
19
|
if objective_list is None:
|
18
|
-
with numpy.nditer(self, flags
|
20
|
+
with numpy.nditer(self, flags=["refs_ok"], op_flags=["readwrite"]) as it:
|
19
21
|
for x in it:
|
20
22
|
x[...] = Objective()
|
21
23
|
else:
|
22
|
-
j=0
|
23
|
-
with numpy.nditer(self, flags
|
24
|
+
j = 0
|
25
|
+
with numpy.nditer(self, flags=["refs_ok"], op_flags=["readwrite"]) as it:
|
24
26
|
for x in it:
|
25
27
|
x[...] = objective_list[j]
|
26
|
-
j=j+1
|
28
|
+
j = j + 1
|
27
29
|
|
28
|
-
def __call__(self,variables=None, *args, **kwargs):
|
30
|
+
def __call__(self, variables=None, *args, **kwargs):
|
29
31
|
"""
|
30
32
|
Return the output of the calculation the objective represents.
|
31
33
|
|
@@ -45,13 +47,15 @@ class QTensor(numpy.ndarray):
|
|
45
47
|
# failsafe
|
46
48
|
check_variables = {k: k in variables for k in self.extract_variables()}
|
47
49
|
if not all(list(check_variables.values())):
|
48
|
-
raise TequilaException(
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
50
|
+
raise TequilaException(
|
51
|
+
"Objective did not receive all variables:\n"
|
52
|
+
"You gave\n"
|
53
|
+
" {}\n"
|
54
|
+
" but the objective depends on\n"
|
55
|
+
" {}\n"
|
56
|
+
" missing values for\n"
|
57
|
+
" {}".format(variables, self.extract_variables(), [k for k, v in check_variables.items() if not v])
|
58
|
+
)
|
55
59
|
|
56
60
|
# avoid multiple evaluations
|
57
61
|
evaluated = {}
|
@@ -60,7 +64,7 @@ class QTensor(numpy.ndarray):
|
|
60
64
|
for obj in newtensor:
|
61
65
|
a = obj(variables=variables, *args, **kwargs)
|
62
66
|
ev_array.append(a)
|
63
|
-
ev_array = numpy.reshape(ev_array,self.shape)
|
67
|
+
ev_array = numpy.reshape(ev_array, self.shape)
|
64
68
|
if ev_array.shape == ():
|
65
69
|
return float(ev_array)
|
66
70
|
elif len(ev_array) == 1:
|
@@ -73,11 +77,11 @@ class QTensor(numpy.ndarray):
|
|
73
77
|
_fn = numpy.vectorize(_f)
|
74
78
|
return _fn(self)
|
75
79
|
|
76
|
-
def extract_variables(self)->list:
|
80
|
+
def extract_variables(self) -> list:
|
77
81
|
newtensor = self.flatten()
|
78
82
|
unique = []
|
79
83
|
for obj in newtensor:
|
80
|
-
if hasattr(obj,
|
84
|
+
if hasattr(obj, "extract_variables"):
|
81
85
|
var_list = obj.extract_variables()
|
82
86
|
for j in var_list:
|
83
87
|
if j not in unique:
|
@@ -94,7 +98,7 @@ class QTensor(numpy.ndarray):
|
|
94
98
|
newtensor = self.flatten()
|
95
99
|
expvals = []
|
96
100
|
for obj in newtensor:
|
97
|
-
if hasattr(obj,
|
101
|
+
if hasattr(obj, "get_expectationvalues"):
|
98
102
|
expvals += obj.get_expectationvalues()
|
99
103
|
return expvals
|
100
104
|
|
@@ -129,7 +133,7 @@ class QTensor(numpy.ndarray):
|
|
129
133
|
return len(self.get_expectationvalues())
|
130
134
|
|
131
135
|
def __repr__(self):
|
132
|
-
_repmat = numpy.empty(self.shape,dtype
|
136
|
+
_repmat = numpy.empty(self.shape, dtype=object)
|
133
137
|
_repmat = _repmat.flatten()
|
134
138
|
newtensor = self.flatten()
|
135
139
|
for i in range(len(newtensor)):
|
@@ -144,7 +148,7 @@ class QTensor(numpy.ndarray):
|
|
144
148
|
newtensor = self.flatten()
|
145
149
|
types = []
|
146
150
|
for obj in newtensor:
|
147
|
-
if hasattr(obj,
|
151
|
+
if hasattr(obj, "get_expectationvalues"):
|
148
152
|
_types = [type(E) for E in obj.get_expectationvalues()]
|
149
153
|
for tt in _types:
|
150
154
|
types.append(tt)
|
@@ -157,20 +161,21 @@ class QTensor(numpy.ndarray):
|
|
157
161
|
|
158
162
|
unique = self.count_expectationvalues(unique=True)
|
159
163
|
measurements = self.count_measurements()
|
160
|
-
return
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
+
return (
|
165
|
+
"QTensor of shape {} with {} unique expectation values\n"
|
166
|
+
"total measurements = {}\n"
|
167
|
+
"variables = {}\n"
|
168
|
+
"types = {}".format(self.shape, unique, measurements, variables, types)
|
169
|
+
)
|
164
170
|
|
165
171
|
def contract(self):
|
166
172
|
newtensor = self.flatten()
|
167
|
-
out_array=[obj for obj in newtensor]
|
173
|
+
out_array = [obj for obj in newtensor]
|
168
174
|
summed = out_array[0]
|
169
175
|
for entry in out_array[1:]:
|
170
176
|
summed += entry
|
171
177
|
return summed
|
172
178
|
|
173
|
-
|
174
179
|
class HelperObject:
|
175
180
|
"""
|
176
181
|
This is a small helper object class for tequila objectives
|
@@ -178,17 +183,21 @@ class QTensor(numpy.ndarray):
|
|
178
183
|
create if like this:
|
179
184
|
ff = HelperObject(func=f) where f is the function you want to apply later (e.g. numpy.sin)
|
180
185
|
use if like this with tequila objectives
|
181
|
-
f_on_objective = ff(objective)
|
186
|
+
f_on_objective = ff(objective)
|
182
187
|
"""
|
188
|
+
|
183
189
|
def __init__(self, func):
|
184
190
|
self.func = func
|
191
|
+
|
185
192
|
def __call__(self, objective):
|
186
193
|
return objective.apply(self.func)
|
187
194
|
|
195
|
+
|
188
196
|
# ------------------------------------------------------
|
189
197
|
# backward compatibility with old VectorObjective class
|
190
198
|
# ------------------------------------------------------
|
191
199
|
|
200
|
+
|
192
201
|
def vectorize(objectives):
|
193
202
|
"""
|
194
203
|
Combine several objectives in order, into one longer vector.
|
@@ -206,6 +215,7 @@ def vectorize(objectives):
|
|
206
215
|
"""
|
207
216
|
return QTensor(objective_list=objectives, shape=(len(objectives),))
|
208
217
|
|
218
|
+
|
209
219
|
def VectorObjective(argsets: typing.Iterable = None, transformations: typing.Iterable[callable] = None):
|
210
220
|
if argsets is None:
|
211
221
|
return QTensor()
|
@@ -219,4 +229,3 @@ def VectorObjective(argsets: typing.Iterable = None, transformations: typing.Ite
|
|
219
229
|
objective_list.append(Objective(args=argsets[i], transformation=transformations[i]))
|
220
230
|
|
221
231
|
return vectorize(objectives=objective_list)
|
222
|
-
|
tequila/optimizers/__init__.py
CHANGED
@@ -6,7 +6,9 @@ from tequila.optimizers.optimizer_gd import minimize as minimize_gd
|
|
6
6
|
from tequila.simulators.simulator_api import simulate
|
7
7
|
from dataclasses import dataclass
|
8
8
|
|
9
|
-
import typing
|
9
|
+
import typing
|
10
|
+
import numbers
|
11
|
+
import numpy
|
10
12
|
|
11
13
|
|
12
14
|
@dataclass
|
@@ -16,27 +18,26 @@ class _Optimizers:
|
|
16
18
|
methods: list = None
|
17
19
|
|
18
20
|
|
19
|
-
SUPPORTED_OPTIMIZERS = [
|
21
|
+
SUPPORTED_OPTIMIZERS = ["scipy", "gpyopt", "gd"]
|
20
22
|
INSTALLED_OPTIMIZERS = {}
|
21
|
-
INSTALLED_OPTIMIZERS[
|
22
|
-
|
23
|
-
|
24
|
-
INSTALLED_OPTIMIZERS[
|
25
|
-
minimize=minimize_gd,
|
26
|
-
methods=OptimizerGD.available_methods())
|
23
|
+
INSTALLED_OPTIMIZERS["scipy"] = _Optimizers(
|
24
|
+
cls=OptimizerSciPy, minimize=minimize_scipy, methods=OptimizerSciPy.available_methods()
|
25
|
+
)
|
26
|
+
INSTALLED_OPTIMIZERS["gd"] = _Optimizers(cls=OptimizerGD, minimize=minimize_gd, methods=OptimizerGD.available_methods())
|
27
27
|
|
28
28
|
has_gpyopt = False
|
29
29
|
try:
|
30
30
|
from tequila.optimizers.optimizer_gpyopt import OptimizerGPyOpt
|
31
31
|
from tequila.optimizers.optimizer_gpyopt import minimize as minimize_gpyopt
|
32
32
|
|
33
|
-
INSTALLED_OPTIMIZERS[
|
34
|
-
|
35
|
-
|
33
|
+
INSTALLED_OPTIMIZERS["gpyopt"] = _Optimizers(
|
34
|
+
cls=OptimizerGPyOpt, minimize=minimize_gpyopt, methods=OptimizerGPyOpt.available_methods()
|
35
|
+
)
|
36
36
|
has_gpyopt = True
|
37
37
|
except ImportError:
|
38
38
|
has_gpyopt = False
|
39
39
|
|
40
|
+
|
40
41
|
def show_available_optimizers(module=None):
|
41
42
|
"""
|
42
43
|
Returns
|
@@ -65,13 +66,15 @@ def show_available_optimizers(module=None):
|
|
65
66
|
print("Installed optimizer modules: ", list(INSTALLED_OPTIMIZERS.keys()))
|
66
67
|
|
67
68
|
|
68
|
-
def minimize(
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
69
|
+
def minimize(
|
70
|
+
objective,
|
71
|
+
method: str = "bfgs",
|
72
|
+
variables: list = None,
|
73
|
+
initial_values: typing.Union[dict, numbers.Number, typing.Callable] = 0.0,
|
74
|
+
maxiter: int = None,
|
75
|
+
*args,
|
76
|
+
**kwargs,
|
77
|
+
):
|
75
78
|
"""
|
76
79
|
|
77
80
|
Parameters
|
@@ -115,10 +118,12 @@ def minimize(objective,
|
|
115
118
|
|
116
119
|
"""
|
117
120
|
|
118
|
-
ovtmp=objective.extract_variables()
|
119
|
-
fast_return=False
|
121
|
+
ovtmp = objective.extract_variables()
|
122
|
+
fast_return = False
|
120
123
|
if ovtmp is None or len(ovtmp) == 0:
|
121
|
-
return OptimizerResults(
|
124
|
+
return OptimizerResults(
|
125
|
+
energy=float(simulate(objective, *args, **kwargs)), variables={}, history=OptimizerHistory()
|
126
|
+
)
|
122
127
|
|
123
128
|
for k, v in INSTALLED_OPTIMIZERS.items():
|
124
129
|
if method.lower() in v.methods or method.upper() in v.methods:
|
@@ -128,7 +133,10 @@ def minimize(objective,
|
|
128
133
|
variables=variables,
|
129
134
|
initial_values=initial_values,
|
130
135
|
maxiter=maxiter,
|
131
|
-
*args,
|
136
|
+
*args,
|
137
|
+
**kwargs,
|
138
|
+
)
|
132
139
|
|
133
140
|
raise TequilaOptimizerException(
|
134
|
-
"Could not find optimization method {} in tequila optimizers. You might miss dependencies"
|
141
|
+
"Could not find optimization method {} in tequila optimizers. You might miss dependencies"
|
142
|
+
)
|
@@ -2,12 +2,12 @@ import numpy
|
|
2
2
|
from tequila.objective import format_variable_dictionary
|
3
3
|
from tequila.tools.qng import evaluate_qng
|
4
4
|
import sys
|
5
|
+
|
5
6
|
"""
|
6
7
|
Define Containers for SciPy usage
|
7
8
|
"""
|
8
9
|
|
9
10
|
|
10
|
-
|
11
11
|
class _EvalContainer:
|
12
12
|
"""
|
13
13
|
Container Class to access scipy and keep the optimization history.
|
@@ -35,8 +35,9 @@ class _EvalContainer:
|
|
35
35
|
|
36
36
|
"""
|
37
37
|
|
38
|
-
def __init__(
|
39
|
-
|
38
|
+
def __init__(
|
39
|
+
self, objective, param_keys, passive_angles=None, samples=None, save_history=True, print_level: int = 3
|
40
|
+
):
|
40
41
|
self.objective = objective
|
41
42
|
self.samples = samples
|
42
43
|
self.param_keys = param_keys
|
@@ -135,11 +136,14 @@ class _QngContainer(_EvalContainer):
|
|
135
136
|
evaluate the qng.
|
136
137
|
"""
|
137
138
|
|
138
|
-
|
139
139
|
def __init__(self, combos, param_keys, passive_angles=None, samples=None, save_history=True):
|
140
|
-
|
141
|
-
|
142
|
-
|
140
|
+
super().__init__(
|
141
|
+
objective=None,
|
142
|
+
param_keys=param_keys,
|
143
|
+
passive_angles=passive_angles,
|
144
|
+
samples=samples,
|
145
|
+
save_history=save_history,
|
146
|
+
)
|
143
147
|
|
144
148
|
self.combos = combos
|
145
149
|
|