eqc-models 0.9.8__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eqc_models-0.10.0.data/platlib/compile_extensions.py +67 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/assignment/setpartition.py +8 -29
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/polyeval.c +127 -123
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/polynomial.py +84 -1
- eqc_models-0.10.0.data/platlib/eqc_models/base.py +115 -0
- eqc_models-0.10.0.data/platlib/eqc_models/combinatorics/setcover.py +93 -0
- eqc_models-0.10.0.data/platlib/eqc_models/communitydetection.py +25 -0
- eqc_models-0.10.0.data/platlib/eqc_models/eqcdirectsolver.py +61 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/graph/base.py +28 -17
- eqc_models-0.10.0.data/platlib/eqc_models/graph/partition.py +148 -0
- eqc_models-0.10.0.data/platlib/eqc_models/graphs.py +28 -0
- eqc_models-0.10.0.data/platlib/eqc_models/maxcut.py +113 -0
- eqc_models-0.10.0.data/platlib/eqc_models/maxkcut.py +185 -0
- eqc_models-0.10.0.data/platlib/eqc_models/ml/classifierqboost.py +628 -0
- eqc_models-0.10.0.data/platlib/eqc_models/ml/cvqboost_hamiltonian.pyx +83 -0
- eqc_models-0.10.0.data/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.c +68 -0
- eqc_models-0.10.0.data/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.h +14 -0
- eqc_models-0.10.0.data/platlib/eqc_models/quadraticmodel.py +131 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/sequence/tsp.py +38 -34
- eqc_models-0.10.0.data/platlib/eqc_models/solvers/eqcdirect.py +160 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/solvers/qciclient.py +46 -11
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/utilities/polynomial.py +11 -0
- {eqc_models-0.9.8.dist-info → eqc_models-0.10.0.dist-info}/METADATA +3 -2
- eqc_models-0.10.0.dist-info/RECORD +65 -0
- {eqc_models-0.9.8.dist-info → eqc_models-0.10.0.dist-info}/WHEEL +1 -1
- eqc_models-0.9.8.data/platlib/compile_extensions.py +0 -23
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqboost.py +0 -423
- eqc_models-0.9.8.dist-info/RECORD +0 -52
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/algorithms/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/algorithms/base.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/algorithms/penaltymultiplier.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/allocation/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/allocation/allocation.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/allocation/portbase.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/allocation/portmomentum.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/assignment/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/assignment/qap.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/base.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/constraints.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/operators.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/polyeval.pyx +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/base/quadratic.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/decoding.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/graph/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/graph/hypergraph.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/graph/maxcut.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/graph/maxkcut.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/classifierbase.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/classifierqsvm.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/clustering.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/clusteringbase.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/decomposition.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/forecast.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/forecastbase.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/regressor.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/regressorbase.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/ml/reservoir.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/sequence/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/solvers/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/utilities/__init__.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/utilities/fileio.py +0 -0
- {eqc_models-0.9.8.data → eqc_models-0.10.0.data}/platlib/eqc_models/utilities/qplib.py +0 -0
- {eqc_models-0.9.8.dist-info → eqc_models-0.10.0.dist-info}/LICENSE.txt +0 -0
- {eqc_models-0.9.8.dist-info → eqc_models-0.10.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import networkx as nx
|
|
2
|
+
import numpy as np
|
|
3
|
+
from .graphs import TwoPartitionModel
|
|
4
|
+
|
|
5
|
+
class MaxCutModel(TwoPartitionModel):
|
|
6
|
+
|
|
7
|
+
def build(self):
|
|
8
|
+
variables = self.variables
|
|
9
|
+
n = len(variables)
|
|
10
|
+
self.domains = np.ones((n,))
|
|
11
|
+
|
|
12
|
+
J = np.zeros((n+1, n+1), dtype=np.float32)
|
|
13
|
+
h = np.zeros((n+1,1), dtype=np.float32)
|
|
14
|
+
for u, v in G.edges:
|
|
15
|
+
J[u, v] += 1
|
|
16
|
+
J[v, u] += 1
|
|
17
|
+
J[u, u] = 1
|
|
18
|
+
J[v, v] = 1
|
|
19
|
+
h[u] -= 1
|
|
20
|
+
h[v] -= 1
|
|
21
|
+
J *= 1/t**2
|
|
22
|
+
h *= 1/t
|
|
23
|
+
H = np.hstack([h, J])
|
|
24
|
+
return H
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def J(self) -> np.ndarray:
|
|
28
|
+
if getattr(self, "_J", None) is None:
|
|
29
|
+
self.build()
|
|
30
|
+
return self._J
|
|
31
|
+
|
|
32
|
+
@property
|
|
33
|
+
def C(self) -> np.ndarray:
|
|
34
|
+
if getattr(self, "C", None) is None:
|
|
35
|
+
self.build()
|
|
36
|
+
return self._C
|
|
37
|
+
|
|
38
|
+
def get_graph(n, d):
|
|
39
|
+
""" Produce a repeatable graph with parameters n and d """
|
|
40
|
+
|
|
41
|
+
seed = n * d
|
|
42
|
+
return nx.random_graphs.random_regular_graph(d, n, seed)
|
|
43
|
+
|
|
44
|
+
def get_partition_graph(G, solution):
|
|
45
|
+
"""
|
|
46
|
+
Build the partitioned graph, counting cut size
|
|
47
|
+
|
|
48
|
+
:parameters: G : nx.DiGraph, solution : np.ndarray
|
|
49
|
+
:returns: nx.DiGraph, int
|
|
50
|
+
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
cut_size = 0
|
|
54
|
+
Gprime = nx.DiGraph()
|
|
55
|
+
Gprime.add_nodes_from(G.nodes)
|
|
56
|
+
for i, j in G.edges:
|
|
57
|
+
if solution[i] != solution[j]:
|
|
58
|
+
cut_size+=1
|
|
59
|
+
else:
|
|
60
|
+
Gprime.add_edge(i, j)
|
|
61
|
+
return Gprime, cut_size
|
|
62
|
+
|
|
63
|
+
def determine_solution(G, solution):
|
|
64
|
+
"""
|
|
65
|
+
Use a simple bisection method to determine the binary solution. Uses
|
|
66
|
+
the cut size as the metric.
|
|
67
|
+
|
|
68
|
+
Returns the partitioned graph and solution.
|
|
69
|
+
|
|
70
|
+
:parameters: G : nx.DiGraph, solution : np.ndarray
|
|
71
|
+
:returns: nx.DiGraph, np.ndarray
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
solution = np.array(solution)
|
|
76
|
+
lower = np.min(solution)
|
|
77
|
+
upper = np.max(solution)
|
|
78
|
+
best_cut_size = 0
|
|
79
|
+
best_graph = G
|
|
80
|
+
best_solution = None
|
|
81
|
+
while upper > lower + 0.0001:
|
|
82
|
+
middle = (lower + upper) / 2
|
|
83
|
+
test_solution = (solution>=middle).astype(np.int32)
|
|
84
|
+
Gprime, cut_size = get_partition_graph(G, test_solution)
|
|
85
|
+
if cut_size > best_cut_size:
|
|
86
|
+
best_cut_size = cut_size
|
|
87
|
+
lower = middle
|
|
88
|
+
best_solution = test_solution
|
|
89
|
+
best_graph = Gprime
|
|
90
|
+
else:
|
|
91
|
+
upper = middle
|
|
92
|
+
return best_graph, best_solution
|
|
93
|
+
|
|
94
|
+
def get_maxcut_H(G, t):
|
|
95
|
+
"""
|
|
96
|
+
Return a Hamiltonian representing the Maximum Cut Problem. Scale the problem using `t`.
|
|
97
|
+
Automatically adds a slack qudit.
|
|
98
|
+
|
|
99
|
+
"""
|
|
100
|
+
n = len(G.nodes)
|
|
101
|
+
J = np.zeros((n+1, n+1), dtype=np.float32)
|
|
102
|
+
h = np.zeros((n+1,1), dtype=np.float32)
|
|
103
|
+
for u, v in G.edges:
|
|
104
|
+
J[u, v] += 1
|
|
105
|
+
J[v, u] += 1
|
|
106
|
+
J[u, u] = 1
|
|
107
|
+
J[v, v] = 1
|
|
108
|
+
h[u] -= 1
|
|
109
|
+
h[v] -= 1
|
|
110
|
+
J *= 1/t**2
|
|
111
|
+
h *= 1/t
|
|
112
|
+
H = np.hstack([h, J])
|
|
113
|
+
return H
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import networkx as nx
|
|
3
|
+
from .quadraticmodel import QuadraticModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MaxKCut(QuadraticModel):
|
|
7
|
+
|
|
8
|
+
def __init__(self, G : nx.Graph, k : int):
|
|
9
|
+
self.G = G
|
|
10
|
+
self.node_map = list(G.nodes)
|
|
11
|
+
self.k = k
|
|
12
|
+
self.partitions = []
|
|
13
|
+
self._lhs = None
|
|
14
|
+
self._rhs = None
|
|
15
|
+
self._objective = None
|
|
16
|
+
self._J = None
|
|
17
|
+
self._C = None
|
|
18
|
+
|
|
19
|
+
def decode(self, solution: np.ndarray) -> np.ndarray:
|
|
20
|
+
""" Override the default decoding to use a the max cut metric to determine a solution """
|
|
21
|
+
|
|
22
|
+
# only one partition per node can be selected
|
|
23
|
+
# rather than the same cutoff per node, use the max value per partition
|
|
24
|
+
decoded_solution = np.zeros_like(solution, dtype=np.int32)
|
|
25
|
+
k = self.k
|
|
26
|
+
for i, u in enumerate(self.node_map):
|
|
27
|
+
idx = slice(k*i, k*(i+1))
|
|
28
|
+
spins = solution[idx]
|
|
29
|
+
mx = np.max(spins)
|
|
30
|
+
for j in range(k):
|
|
31
|
+
if spins[j] == mx:
|
|
32
|
+
decoded_solution[k*i+j] = 1
|
|
33
|
+
break
|
|
34
|
+
return decoded_solution
|
|
35
|
+
|
|
36
|
+
def partition(self, solution):
|
|
37
|
+
""" Return a dictionary with the partition number of each node """
|
|
38
|
+
k = self.k
|
|
39
|
+
n = len(self.node_map)
|
|
40
|
+
partition_num = {}
|
|
41
|
+
for i, u in enumerate(self.node_map):
|
|
42
|
+
for j in range(k):
|
|
43
|
+
if solution[i*k+j] == 1:
|
|
44
|
+
partition_num[u] = j+1
|
|
45
|
+
return partition_num
|
|
46
|
+
|
|
47
|
+
def getCutSize(self, partition):
|
|
48
|
+
cut_size = 0
|
|
49
|
+
for u, v in self.G.edges:
|
|
50
|
+
if partition[u]!=partition[v]:
|
|
51
|
+
cut_size += 1
|
|
52
|
+
return cut_size
|
|
53
|
+
|
|
54
|
+
def _build_objective(self):
|
|
55
|
+
|
|
56
|
+
node_map = self.node_map
|
|
57
|
+
G = self.G
|
|
58
|
+
m = len(G.nodes)
|
|
59
|
+
n = self.k * m
|
|
60
|
+
# construct the quadratic portion of the objective
|
|
61
|
+
# the linear portion is 0
|
|
62
|
+
objective = np.zeros((n, n), dtype=np.float32)
|
|
63
|
+
# increment the joint variable terms indicating the nodes are in different sets
|
|
64
|
+
pairs = [(i, j) for i in range(self.k) for j in range(self.k) if i!=j]
|
|
65
|
+
for u, v in G.edges:
|
|
66
|
+
i = node_map.index(u)
|
|
67
|
+
j = node_map.index(v)
|
|
68
|
+
ibase = i * self.k
|
|
69
|
+
jbase = j * self.k
|
|
70
|
+
for incr1, incr2 in pairs:
|
|
71
|
+
idx1 = ibase + incr1
|
|
72
|
+
idx2 = jbase + incr2
|
|
73
|
+
objective[idx1, idx2] += -1
|
|
74
|
+
self._objective = (np.zeros((n, 1)), objective)
|
|
75
|
+
|
|
76
|
+
def _build_constraints(self):
|
|
77
|
+
|
|
78
|
+
node_map = self.node_map
|
|
79
|
+
G = self.G
|
|
80
|
+
m = len(G.nodes)
|
|
81
|
+
n = self.k * m
|
|
82
|
+
|
|
83
|
+
# build the constraints
|
|
84
|
+
A = np.zeros((m, n))
|
|
85
|
+
b = np.ones((m,))
|
|
86
|
+
for u in G.nodes:
|
|
87
|
+
i = node_map.index(u)
|
|
88
|
+
ibase = i * self.k
|
|
89
|
+
A[i, ibase:ibase+self.k] = 1
|
|
90
|
+
self._lhs = A
|
|
91
|
+
self._rhs = b
|
|
92
|
+
|
|
93
|
+
def build(self, multiplier=None):
|
|
94
|
+
""" Create the constraints and objective and Hamiltonian """
|
|
95
|
+
|
|
96
|
+
# there are k * m variables in this problem where m is the number of nodes in the graph
|
|
97
|
+
node_map = self.node_map
|
|
98
|
+
G = self.G
|
|
99
|
+
m = len(G.nodes)
|
|
100
|
+
n = self.k * m
|
|
101
|
+
self.domains = np.ones((n,))
|
|
102
|
+
|
|
103
|
+
self._build_objective()
|
|
104
|
+
if multiplier is None:
|
|
105
|
+
multiplier = np.max(np.abs(self._objective[1]))
|
|
106
|
+
self._build_constraints()
|
|
107
|
+
|
|
108
|
+
self._C, self._J = self.buildH(multiplier)
|
|
109
|
+
self.sum_constraint = m
|
|
110
|
+
|
|
111
|
+
def buildH(self, multiplier):
|
|
112
|
+
""" Combine the objective and penalties using the multiplier """
|
|
113
|
+
|
|
114
|
+
objC, objJ = self.objective
|
|
115
|
+
lhs, rhs = self.constraints
|
|
116
|
+
Pq = lhs.T@lhs
|
|
117
|
+
Pl = -2 * rhs.T@lhs
|
|
118
|
+
offset = rhs.T@rhs
|
|
119
|
+
n = self.n
|
|
120
|
+
J = np.zeros((n, n), np.float32)
|
|
121
|
+
C = np.zeros([n, 1], np.float32)
|
|
122
|
+
C += objC
|
|
123
|
+
J[:,:] += objJ
|
|
124
|
+
C += multiplier * Pl.reshape((n, 1))
|
|
125
|
+
J[:,:] += multiplier * Pq
|
|
126
|
+
return C, J
|
|
127
|
+
|
|
128
|
+
@property
|
|
129
|
+
def constraints(self):
|
|
130
|
+
""" Return LHS, RHS in numpy matrix format """
|
|
131
|
+
if self._rhs is None:
|
|
132
|
+
self.build()
|
|
133
|
+
return self._lhs, self._rhs
|
|
134
|
+
|
|
135
|
+
@property
|
|
136
|
+
def objective(self):
|
|
137
|
+
""" Return the quadratic objective as NxN+1 matrix """
|
|
138
|
+
|
|
139
|
+
if self._objective is None:
|
|
140
|
+
self.build()
|
|
141
|
+
return self._objective
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def H(self):
|
|
145
|
+
""" Return the Hamiltonian as parts C, J """
|
|
146
|
+
|
|
147
|
+
if self._C is None:
|
|
148
|
+
self.build()
|
|
149
|
+
return self._C, self._J
|
|
150
|
+
|
|
151
|
+
class WeightedMaxKCut(MaxKCut):
|
|
152
|
+
|
|
153
|
+
def __init__(self, G: nx.Graph, k: int, weight_label : str = "weight"):
|
|
154
|
+
super().__init__(G, k)
|
|
155
|
+
|
|
156
|
+
self.weight_label = weight_label
|
|
157
|
+
|
|
158
|
+
def _build_objective(self):
|
|
159
|
+
|
|
160
|
+
node_map = self.node_map
|
|
161
|
+
G = self.G
|
|
162
|
+
m = len(G.nodes)
|
|
163
|
+
n = self.k * m
|
|
164
|
+
# construct the quadratic portion of the objective
|
|
165
|
+
# the linear portion is 0
|
|
166
|
+
objective = np.zeros((n, n), dtype=np.float32)
|
|
167
|
+
# increment the joint variable terms indicating the nodes are in different sets
|
|
168
|
+
pairs = [(i, j) for i in range(self.k) for j in range(self.k) if i!=j]
|
|
169
|
+
for u, v in G.edges:
|
|
170
|
+
i = node_map.index(u)
|
|
171
|
+
j = node_map.index(v)
|
|
172
|
+
ibase = i * self.k
|
|
173
|
+
jbase = j * self.k
|
|
174
|
+
for incr1, incr2 in pairs:
|
|
175
|
+
idx1 = ibase + incr1
|
|
176
|
+
idx2 = jbase + incr2
|
|
177
|
+
objective[idx1, idx2] += G[u][v][self.weight_label]
|
|
178
|
+
self._objective = (np.zeros((n, 1)), objective)
|
|
179
|
+
|
|
180
|
+
def getCutSize(self, partition):
|
|
181
|
+
cut_size = 0
|
|
182
|
+
for u, v in self.G.edges:
|
|
183
|
+
if partition[u]!=partition[v]:
|
|
184
|
+
cut_size += self.G[u][v][self.weight_label]
|
|
185
|
+
return cut_size
|