eqc-models 0.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. eqc_models-0.9.8.data/platlib/compile_extensions.py +23 -0
  2. eqc_models-0.9.8.data/platlib/eqc_models/__init__.py +15 -0
  3. eqc_models-0.9.8.data/platlib/eqc_models/algorithms/__init__.py +4 -0
  4. eqc_models-0.9.8.data/platlib/eqc_models/algorithms/base.py +10 -0
  5. eqc_models-0.9.8.data/platlib/eqc_models/algorithms/penaltymultiplier.py +169 -0
  6. eqc_models-0.9.8.data/platlib/eqc_models/allocation/__init__.py +6 -0
  7. eqc_models-0.9.8.data/platlib/eqc_models/allocation/allocation.py +367 -0
  8. eqc_models-0.9.8.data/platlib/eqc_models/allocation/portbase.py +128 -0
  9. eqc_models-0.9.8.data/platlib/eqc_models/allocation/portmomentum.py +137 -0
  10. eqc_models-0.9.8.data/platlib/eqc_models/assignment/__init__.py +5 -0
  11. eqc_models-0.9.8.data/platlib/eqc_models/assignment/qap.py +82 -0
  12. eqc_models-0.9.8.data/platlib/eqc_models/assignment/setpartition.py +170 -0
  13. eqc_models-0.9.8.data/platlib/eqc_models/base/__init__.py +72 -0
  14. eqc_models-0.9.8.data/platlib/eqc_models/base/base.py +150 -0
  15. eqc_models-0.9.8.data/platlib/eqc_models/base/constraints.py +276 -0
  16. eqc_models-0.9.8.data/platlib/eqc_models/base/operators.py +201 -0
  17. eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.c +11363 -0
  18. eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
  19. eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.pyx +72 -0
  20. eqc_models-0.9.8.data/platlib/eqc_models/base/polynomial.py +274 -0
  21. eqc_models-0.9.8.data/platlib/eqc_models/base/quadratic.py +250 -0
  22. eqc_models-0.9.8.data/platlib/eqc_models/decoding.py +20 -0
  23. eqc_models-0.9.8.data/platlib/eqc_models/graph/__init__.py +5 -0
  24. eqc_models-0.9.8.data/platlib/eqc_models/graph/base.py +63 -0
  25. eqc_models-0.9.8.data/platlib/eqc_models/graph/hypergraph.py +307 -0
  26. eqc_models-0.9.8.data/platlib/eqc_models/graph/maxcut.py +155 -0
  27. eqc_models-0.9.8.data/platlib/eqc_models/graph/maxkcut.py +184 -0
  28. eqc_models-0.9.8.data/platlib/eqc_models/ml/__init__.py +15 -0
  29. eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierbase.py +99 -0
  30. eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqboost.py +423 -0
  31. eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqsvm.py +237 -0
  32. eqc_models-0.9.8.data/platlib/eqc_models/ml/clustering.py +323 -0
  33. eqc_models-0.9.8.data/platlib/eqc_models/ml/clusteringbase.py +112 -0
  34. eqc_models-0.9.8.data/platlib/eqc_models/ml/decomposition.py +363 -0
  35. eqc_models-0.9.8.data/platlib/eqc_models/ml/forecast.py +255 -0
  36. eqc_models-0.9.8.data/platlib/eqc_models/ml/forecastbase.py +139 -0
  37. eqc_models-0.9.8.data/platlib/eqc_models/ml/regressor.py +220 -0
  38. eqc_models-0.9.8.data/platlib/eqc_models/ml/regressorbase.py +97 -0
  39. eqc_models-0.9.8.data/platlib/eqc_models/ml/reservoir.py +106 -0
  40. eqc_models-0.9.8.data/platlib/eqc_models/sequence/__init__.py +5 -0
  41. eqc_models-0.9.8.data/platlib/eqc_models/sequence/tsp.py +217 -0
  42. eqc_models-0.9.8.data/platlib/eqc_models/solvers/__init__.py +12 -0
  43. eqc_models-0.9.8.data/platlib/eqc_models/solvers/qciclient.py +707 -0
  44. eqc_models-0.9.8.data/platlib/eqc_models/utilities/__init__.py +6 -0
  45. eqc_models-0.9.8.data/platlib/eqc_models/utilities/fileio.py +38 -0
  46. eqc_models-0.9.8.data/platlib/eqc_models/utilities/polynomial.py +137 -0
  47. eqc_models-0.9.8.data/platlib/eqc_models/utilities/qplib.py +375 -0
  48. eqc_models-0.9.8.dist-info/LICENSE.txt +202 -0
  49. eqc_models-0.9.8.dist-info/METADATA +139 -0
  50. eqc_models-0.9.8.dist-info/RECORD +52 -0
  51. eqc_models-0.9.8.dist-info/WHEEL +5 -0
  52. eqc_models-0.9.8.dist-info/top_level.txt +2 -0
@@ -0,0 +1,307 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ from typing import List, Tuple, Dict, Union
4
+ from collections import defaultdict
5
+ from eqc_models.base import ConstraintsMixIn, PolynomialModel
6
+ from eqc_models.base.operators import Polynomial
7
+
8
+
9
+ class HypergraphModel(ConstraintsMixIn, PolynomialModel):
10
+ """
11
+ HypergraphModel represents a flexible model for constructing and preparing hypergraph-based
12
+ polynomial optimization problems for use with solvers serviced through eqc-models.
13
+
14
+ Parameters
15
+ ----------
16
+ data : List of Lists, Dictionary, Nested Dictionary, np.ndarray, pd.DataFrame, or nx.Graph
17
+ The hypergraph data representing terms and relationships among nodes. Supported formats:
18
+
19
+ - List of lists: Each sublist represents a hyperedge with nodes as elements.
20
+ - Dictionary of tuples: Each key is a unique term identifier, with values as tuples of nodes.
21
+ - Nested dictionary: Supports detailed attribute descriptions per node in each term.
22
+ - 2D np.ndarray: Each row represents a relationship with two elements, where the first
23
+ is the hyperedge and the second is the node.
24
+ - pd.DataFrame: Assumes the first two columns are edges and nodes by default, with an optional
25
+ 'weight' column specifying weights for incidences.
26
+
27
+ lhs : np.ndarray, optional
28
+ Left-hand side matrix for linear constraints in penalty terms.
29
+ rhs : np.ndarray, optional
30
+ Right-hand side vector for linear constraints in penalty terms.
31
+ alpha : float, optional
32
+ Multiplier for penalties associated with linear constraints, default is 1.0.
33
+
34
+ Attributes
35
+ ----------
36
+ H : tuple of arrays
37
+ Polynomial coefficients and indices for the Hamiltonian representation of the problem.
38
+
39
+ penalty_multiplier : float
40
+ Weighting for penalties formed from linear constraints, which scales the penalty terms.
41
+
42
+ polynomial : eqc_models.base.operators.Polynomial
43
+ Polynomial operator representation for the problem terms.
44
+
45
+ qubo : eqc_models.base.operators.QUBO
46
+ QUBO operator representation if quadratic constraints are required.
47
+
48
+ dynamic_range : float
49
+ Dynamic range of the polynomial coefficients, measured in decibels.
50
+
51
+ This class provides a model for representing hypergraph-based optimization problems with various
52
+ polynomial terms based on input types. The hypergraph data can include hyperedges of arbitrary order,
53
+ penalties for linear constraints, and flexible terms to allow encoding multibody interactions.
54
+
55
+ Example
56
+ -------
57
+ An example of creating a hypergraph model from a list of lists input representing hyperedges:
58
+
59
+ >>> data = [['A', 'B', 'C'], ['A', 'D'], ['C', 'D', 'E']]
60
+ >>> lhs = np.array([[1, -1, 0], [0, 1, -1]])
61
+ >>> rhs = np.array([0, 0])
62
+ >>> model = HypergraphModel(data, lhs=lhs, rhs=rhs, alpha=2.0)
63
+ >>> model.penalty_multiplier
64
+ 2.0
65
+ >>> coefficients, indices = model.H
66
+ >>> coefficients
67
+ array([1., 1., 1.])
68
+ >>> indices
69
+ array([[1, 2, 3],
70
+ [0, 1, 4],
71
+ [3, 4, 5]])
72
+
73
+ This model can then be used with solvers serviced through eqc-models for optimizing polynomial-based
74
+ objectives with hypergraph structure.
75
+ """
76
+ def __init__(self, data: Union[List[List], Dict[int, Tuple], Dict[int, Dict[str, Dict[str, Union[str, int]]]],
77
+ np.ndarray, pd.DataFrame], lhs: np.ndarray = None, rhs: np.ndarray = None,
78
+ alpha: float = 1.0):
79
+ # Process data to extract `coefficients` and `indices` for polynomial terms
80
+ coefficients, indices = self.process_data(data)
81
+
82
+ # Initialize PolynomialModel with processed coefficients and indices
83
+ super().__init__(coefficients, indices)
84
+
85
+ # Initialize constraints if provided
86
+ if lhs is not None and rhs is not None:
87
+ self.constraints = (lhs, rhs)
88
+ self.penalty_multiplier = alpha
89
+
90
+ def process_data(self, data: Union[List[List], Dict[int, Tuple], Dict[int, Dict[str, Dict[str, Union[str, int]]]],
91
+ np.ndarray, pd.DataFrame]) -> Tuple[np.ndarray, np.ndarray]:
92
+ """
93
+ Processes the input data to extract polynomial coefficients and indices for
94
+ the hypergraph representation.
95
+
96
+ Parameters
97
+ ----------
98
+ data : Union[List of Lists, Dict, Dict of Dicts, np.ndarray, pd.DataFrame]
99
+ Input hypergraph data in various supported formats, representing terms
100
+ and relationships among nodes.
101
+
102
+ Returns
103
+ -------
104
+ Tuple of numpy arrays
105
+ Tuple containing two numpy arrays, one for coefficients and one for indices
106
+ representing polynomial terms.
107
+ """
108
+ if isinstance(data, list):
109
+ return self._process_list_data(data)
110
+ elif isinstance(data, dict):
111
+ return self._process_dict_data(data)
112
+ elif isinstance(data, np.ndarray) and data.shape[1] == 2:
113
+ return self._process_ndarray_data(data)
114
+ elif isinstance(data, pd.DataFrame) and data.shape[1] >= 2:
115
+ return self._process_dataframe_data(data)
116
+ else:
117
+ raise ValueError("Unsupported data type for hypergraph model")
118
+
119
+ def _process_list_data(self, data: List[List]) -> Tuple[np.ndarray, np.ndarray]:
120
+ """
121
+ Converts a list of lists to formatted coefficients and indices.
122
+
123
+ Parameters
124
+ ----------
125
+ data : List of Lists
126
+ Each sublist represents a hyperedge with nodes as elements.
127
+
128
+ Returns
129
+ -------
130
+ Tuple of numpy arrays
131
+ Coefficients as a 1D array and indices as a 2D array formatted with
132
+ polynomial terms for each hyperedge.
133
+ """
134
+ # Create a unique index for each element across all sublists
135
+ unique_elements = sorted(set(element for sublist in data for element in sublist))
136
+ element_to_index = {element: idx + 1 for idx, element in enumerate(unique_elements)} # 1-based indexing
137
+
138
+ indices = []
139
+ order = max([len(sublist) for sublist in data])
140
+ for sublist in data:
141
+ # Convert each element in the sublist to its unique index and sort
142
+ term_indices = sorted(element_to_index[element] for element in sublist)
143
+ indices.append(([0] * (order - len(sublist))) + term_indices) # Prepend 0 to each index list per format
144
+
145
+ indices = sorted(indices)
146
+ coefficients = np.ones(len(data)) # Default coefficient of 1.0 for each sublist term
147
+ return coefficients, np.array(indices)
148
+
149
+ def _process_dict_data(self, data: Dict[int, Tuple[str, ...]]) -> Tuple[np.ndarray, np.ndarray]:
150
+ """
151
+ Converts a dictionary with tuple values to formatted coefficients and indices.
152
+
153
+ Parameters
154
+ ----------
155
+ data : Dictionary of tuples
156
+ Dictionary where each key represents a unique term identifier and values
157
+ are tuples of nodes forming the term.
158
+
159
+ Returns
160
+ -------
161
+ Tuple of numpy arrays
162
+ Coefficients as a 1D array and indices as a 2D array formatted with
163
+ polynomial terms for each term in the dictionary.
164
+ """
165
+ # Create a unique index for each element across all tuples
166
+ unique_elements = sorted(set(element for elements in data.values() for element in elements))
167
+ element_to_index = {element: idx + 1 for idx, element in enumerate(unique_elements)} # 1-based indexing
168
+
169
+ indices = []
170
+ order = max([len(sublist) for sublist in data.values()])
171
+ for elements in data.values():
172
+ # Convert each element in the tuple to its unique index and sort
173
+ term_indices = sorted(element_to_index[element] for element in elements)
174
+ indices.append(([0] * (order - len(elements))) + term_indices) # Prepend 0 to each index list per format
175
+
176
+ indices = sorted(indices)
177
+ coefficients = np.ones(len(data)) # Default coefficient of 1.0 for each term
178
+ return coefficients, np.array(indices)
179
+
180
+ def _process_ndarray_data(self, data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
181
+ """
182
+ Converts a numpy array to formatted coefficients and indices, assuming
183
+ shape (N, 2), where each row represents a [Group, Node] pair.
184
+
185
+ Parameters
186
+ ----------
187
+ data : np.ndarray
188
+ 2D array where each row represents a hyperedge and node relationship.
189
+
190
+ Returns
191
+ -------
192
+ Tuple of numpy arrays
193
+ Coefficients as a 1D array with default values, and indices as a 2D array
194
+ formatted for each group of nodes within each unique hyperedge.
195
+ """
196
+ # Group nodes by hyperedge label (first element in each row)
197
+ grouped_nodes = defaultdict(list)
198
+ for group_label, node_label in data:
199
+ grouped_nodes[group_label].append(node_label)
200
+
201
+ # Create a unique index for each node label across all hyperedges
202
+ unique_nodes = sorted(set(node for nodes in grouped_nodes.values() for node in nodes))
203
+ node_to_index = {node: idx + 1 for idx, node in enumerate(unique_nodes)} # 1-based indexing
204
+
205
+ indices = []
206
+ order = max([len(sublist) for sublist in grouped_nodes.values()])
207
+ for nodes in grouped_nodes.values():
208
+ # Convert each node in the hyperedge to its unique index and sort
209
+ term_indices = sorted(node_to_index[node] for node in nodes)
210
+ indices.append(([0] * (order - len(nodes))) + term_indices) # Prepend 0 to each index list per format
211
+
212
+ indices = sorted(indices)
213
+ coefficients = np.ones(len(indices)) # Default coefficient of 1.0 for each hyperedge term
214
+ return coefficients, np.array(indices)
215
+
216
+ def _process_dataframe_data(self, data: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
217
+ """
218
+ Converts a DataFrame with edge, node, and optional weight columns to formatted
219
+ coefficients and indices.
220
+
221
+ Parameters
222
+ ----------
223
+ data : pd.DataFrame
224
+ DataFrame where the first two columns represent edges and nodes, and an
225
+ optional 'weight' column specifies weights for each incidence.
226
+
227
+ Returns
228
+ -------
229
+ Tuple of numpy arrays
230
+ Coefficients based on the 'weight' column if present, and indices as a 2D
231
+ array for each hyperedge.
232
+ """
233
+ # Use the first two columns as edge and node labels
234
+ edge_column = data.columns[0]
235
+ node_column = data.columns[1]
236
+
237
+ # Check for a 'weight' column; default weights to 1.0 if not present
238
+ if 'weight' in data.columns:
239
+ weights = data['weight'].fillna(1.0).values
240
+ else:
241
+ weights = np.ones(len(data))
242
+
243
+ # Group nodes by hyperedge (first column)
244
+ grouped_nodes = data.groupby(edge_column)[node_column].apply(list).to_dict()
245
+ grouped_weights = data.groupby(edge_column)['weight'].apply(lambda x: x.iloc[0] if 'weight' in data.columns else 1.0).tolist()
246
+
247
+ # Map each node label to a unique, 1-based index
248
+ unique_nodes = sorted(set(node for nodes in grouped_nodes.values() for node in nodes))
249
+ node_to_index = {node: idx + 1 for idx, node in enumerate(unique_nodes)} # 1-based indexing
250
+
251
+ indices = []
252
+ order = max([len(sublist) for sublist in grouped_nodes.values()])
253
+ for nodes in grouped_nodes.values():
254
+ # Convert each node in the hyperedge to its unique index and sort
255
+ term_indices = sorted(node_to_index[node] for node in nodes)
256
+ indices.append(([0] * (order - len(nodes))) + term_indices) # Prepend 0 to each index list per format
257
+
258
+ # Sort indices and grouped_weights
259
+ grouped_weights = [x for _, x in sorted(zip(indices, grouped_weights))]
260
+ indices = sorted(indices)
261
+
262
+ coefficients = np.array(grouped_weights) # Use the grouped weights as coefficients
263
+ return coefficients, np.array(indices)
264
+
265
+ @property
266
+ def H(self) -> Tuple[np.ndarray, np.ndarray]:
267
+ """
268
+ Retrieves the Hamiltonian representation as polynomial coefficients and indices.
269
+
270
+ Returns
271
+ -------
272
+ Tuple of numpy arrays
273
+ Coefficients and indices for polynomial terms in the Hamiltonian.
274
+ """
275
+ return self.coefficients, self.indices
276
+
277
+ def evaluateObjective(self, solution: np.ndarray) -> float:
278
+ """
279
+ Evaluate polynomial at solution
280
+
281
+ :solution: 1-d numpy array with the same length as the number of variables
282
+
283
+ returns a floating point value
284
+
285
+ """
286
+
287
+ value = 0
288
+ coefficients, indices = self.H
289
+ for index, coefficient in zip(indices, coefficients):
290
+ term = coefficient
291
+ for i in index:
292
+ if i > 0:
293
+ term *= solution[i - 1]
294
+ value += term
295
+ return value
296
+
297
+ @property
298
+ def polynomial(self) -> Polynomial:
299
+ """
300
+ Retrieves the polynomial operator representation of the hypergraph model.
301
+
302
+ Returns
303
+ -------
304
+ Polynomial
305
+ Polynomial operator representing terms in the hypergraph model.
306
+ """
307
+ return Polynomial(list(self.H[0]), list(self.H[1]))
@@ -0,0 +1,155 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ import networkx as nx
3
+ import numpy as np
4
+ from .base import TwoPartitionModel
5
+
6
+
7
+ class MaxCutModel(TwoPartitionModel):
8
+
9
+ def decode(self, solution: np.ndarray) -> np.ndarray:
10
+ """ Override the default decoding to use a the max cut metric to determine a solution """
11
+
12
+ Gprime, solution = determine_solution(self.G, solution)
13
+ cut_size = len(self.G.edges) - len(Gprime.edges)
14
+ return solution
15
+
16
+ @property
17
+ def J(self) -> np.ndarray:
18
+ return self.quad_objective
19
+
20
+ @property
21
+ def C(self) -> np.ndarray:
22
+ return self.linear_objective
23
+
24
+ @property
25
+ def H(self):
26
+ return self.linear_objective, self.quad_objective
27
+
28
+ def partition(self, solution):
29
+ """ Return a dictionary with the partition number of each node """
30
+
31
+ partition_num = {}
32
+ for i, u in enumerate(self.node_map):
33
+ if solution[i] == 0:
34
+ partition_num[u] = 1
35
+ else:
36
+ partition_num[u] = 2
37
+ return partition_num
38
+
39
+ def getCutSize(self, partition):
40
+ cut_size = 0
41
+ for u, v in self.G.edges:
42
+ if partition[u]!=partition[v]:
43
+ cut_size += 1
44
+ return cut_size
45
+
46
+ def costFunction(self):
47
+ """
48
+ Parameters
49
+ -------------
50
+
51
+ None
52
+
53
+ Returns
54
+ --------------
55
+
56
+ :C: linear operator (vector array of coefficients) for cost function
57
+ :J: quadratic operator (N by N matrix array of coefficients ) for cost function
58
+
59
+ """
60
+ G = self.G
61
+ self.node_map = list(G.nodes)
62
+ variables = self.variables
63
+ n = len(variables)
64
+ self.upper_bound = np.ones((n,))
65
+
66
+ J = np.zeros((n, n), dtype=np.float32)
67
+ h = np.zeros((n, 1), dtype=np.float32)
68
+ for u, v in G.edges:
69
+ J[u, v] += 1
70
+ J[v, u] += 1
71
+ h[u, 0] -= 1
72
+ h[v, 0] -= 1
73
+ return h, J
74
+
75
+
76
+ def get_graph(n, d):
77
+ """ Produce a repeatable graph with parameters n and d """
78
+
79
+ seed = n * d
80
+ return nx.random_graphs.random_regular_graph(d, n, seed)
81
+
82
+
83
+ def get_partition_graph(G, solution):
84
+ """
85
+ Build the partitioned graph, counting cut size
86
+
87
+ :parameters: G : nx.DiGraph, solution : np.ndarray
88
+ :returns: nx.DiGraph, int
89
+
90
+ """
91
+
92
+ cut_size = 0
93
+ Gprime = nx.DiGraph()
94
+ Gprime.add_nodes_from(G.nodes)
95
+ for i, j in G.edges:
96
+ if solution[i] != solution[j]:
97
+ cut_size += 1
98
+ else:
99
+ Gprime.add_edge(i, j)
100
+ return Gprime, cut_size
101
+
102
+
103
+ def determine_solution(G, solution):
104
+ """
105
+ Use a simple bisection method to determine the binary solution. Uses
106
+ the cut size as the metric.
107
+
108
+ Returns the partitioned graph and solution.
109
+
110
+ :parameters: G : nx.DiGraph, solution : np.ndarray
111
+ :returns: nx.DiGraph, np.ndarray
112
+
113
+ """
114
+ solution = np.array(solution)
115
+ test_vals = np.copy(solution)
116
+ test_vals.sort()
117
+ lower = 0
118
+ upper = solution.shape[0] - 1
119
+ best_cut_size = 0
120
+ best_graph = G
121
+ best_solution = None
122
+ while upper > lower:
123
+ middle = (upper + lower) // 2
124
+ threshold = test_vals[middle]
125
+ test_solution = (solution>=threshold).astype(np.int32)
126
+ Gprime, cut_size = get_partition_graph(G, test_solution)
127
+ if cut_size > best_cut_size:
128
+ best_cut_size = cut_size
129
+ lower = middle
130
+ best_solution = test_solution
131
+ best_graph = Gprime
132
+ else:
133
+ upper = middle
134
+ return best_graph, best_solution
135
+
136
+ def get_maxcut_H(G, t):
137
+ """
138
+ Return a Hamiltonian representing the Maximum Cut Problem. Scale the problem using `t`.
139
+ Automatically adds a slack qudit.
140
+
141
+ """
142
+ n = len(G.nodes)
143
+ J = np.zeros(shape=(n+1, n+1), dtype=np.float32)
144
+ h = np.zeros(shape=(n+1,1), dtype=np.float32)
145
+ for u, v in G.edges:
146
+ J[u, v] += 1
147
+ J[v, u] += 1
148
+ J[u, u] = 1
149
+ J[v, v] = 1
150
+ h[u] -= 1
151
+ h[v] -= 1
152
+ J *= 1/t**2
153
+ h *= 1/t
154
+ H = np.hstack([h, J])
155
+ return H
@@ -0,0 +1,184 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ import numpy as np
3
+ import networkx as nx
4
+ from .base import NodeModel
5
+
6
+
7
+ class MaxKCutModel(NodeModel):
8
+
9
+ def __init__(self, G : nx.Graph, k : int):
10
+ super(MaxKCutModel, self).__init__(G)
11
+ self.k = k
12
+ self.lhs = None
13
+ self.rhs = None
14
+ self._objective = None
15
+ self._J = None
16
+ self._C = None
17
+
18
+ def decode(self, solution: np.ndarray) -> np.ndarray:
19
+ """ Override the default decoding to use a the max cut metric to determine a solution """
20
+
21
+ # only one partition per node can be selected
22
+ # rather than the same cutoff per node, use the max value per partition
23
+ decoded_solution = np.zeros_like(solution, dtype=np.int32)
24
+ k = self.k
25
+ for i, u in enumerate(self.variables):
26
+ idx = slice(k*i, k*(i+1))
27
+ spins = solution[idx]
28
+ mx = np.max(spins)
29
+ for j in range(k):
30
+ if spins[j] == mx:
31
+ decoded_solution[k*i+j] = 1
32
+ break
33
+ return decoded_solution
34
+
35
+ def partition(self, solution):
36
+ """ Return a dictionary with the partition number of each node """
37
+ k = self.k
38
+ n = len(self.variables)
39
+ partition_num = {}
40
+ for i, u in enumerate(self.variables):
41
+ for j in range(k):
42
+ if solution[i*k+j] == 1:
43
+ partition_num[u] = j+1
44
+ return partition_num
45
+
46
+ def getCutSize(self, partition):
47
+ cut_size = 0
48
+ for u, v in self.G.edges:
49
+ if partition[u]!=partition[v]:
50
+ cut_size += 1
51
+ return cut_size
52
+
53
+ def _build_objective(self):
54
+
55
+ node_map = self.variables
56
+ G = self.G
57
+ m = len(G.nodes)
58
+ n = self.k * m
59
+ # construct the quadratic portion of the objective
60
+ # the linear portion is 0
61
+ objective = np.zeros((n, n), dtype=np.float32)
62
+ # increment the joint variable terms indicating the nodes are in different sets
63
+ pairs = [(i, j) for i in range(self.k) for j in range(self.k) if i!=j]
64
+ for u, v in G.edges:
65
+ i = node_map.index(u)
66
+ j = node_map.index(v)
67
+ ibase = i * self.k
68
+ jbase = j * self.k
69
+ for incr1, incr2 in pairs:
70
+ idx1 = ibase + incr1
71
+ idx2 = jbase + incr2
72
+ objective[idx1, idx2] += -1
73
+ self._objective = (np.zeros((n, 1)), objective)
74
+
75
+ def _build_constraints(self):
76
+
77
+ node_map = self.variables
78
+ G = self.G
79
+ m = len(G.nodes)
80
+ n = self.k * m
81
+
82
+ # build the constraints
83
+ A = np.zeros((m, n))
84
+ b = np.ones((m,))
85
+ for u in G.nodes:
86
+ i = node_map.index(u)
87
+ ibase = i * self.k
88
+ A[i, ibase:ibase+self.k] = 1
89
+ self.lhs = A
90
+ self.rhs = b
91
+
92
+ def build(self, multiplier=None):
93
+ """ Create the constraints and objective and Hamiltonian """
94
+
95
+ # there are k * m variables in this problem where m is the number of nodes in the graph
96
+ node_map = self.variables
97
+ G = self.G
98
+ m = len(G.nodes)
99
+ n = self.k * m
100
+ self.upper_bound = np.ones((n,))
101
+
102
+ self._build_objective()
103
+ if multiplier is None:
104
+ multiplier = np.max(np.abs(self._objective[1]))
105
+ self._build_constraints()
106
+
107
+ self._C, self._J = self.buildH(multiplier)
108
+ self.sum_constraint = m
109
+
110
+ def buildH(self, multiplier):
111
+ """ Combine the objective and penalties using the multiplier """
112
+
113
+ objC, objJ = self.objective
114
+ lhs, rhs = self.constraints
115
+ Pq = lhs.T@lhs
116
+ Pl = -2 * rhs.T@lhs
117
+ offset = rhs.T@rhs
118
+ n = self.n
119
+ J = np.zeros((n, n), np.float32)
120
+ C = np.zeros([n, 1], np.float32)
121
+ C += objC
122
+ J[:,:] += objJ
123
+ C += multiplier * Pl.reshape((n, 1))
124
+ J[:,:] += multiplier * Pq
125
+ return C, J
126
+
127
+ @property
128
+ def constraints(self):
129
+ """ Return LHS, RHS in numpy matrix format """
130
+ if self.rhs is None:
131
+ self.build()
132
+ return self.lhs, self.rhs
133
+
134
+ @property
135
+ def objective(self):
136
+ """ Return the quadratic objective as NxN+1 matrix """
137
+
138
+ if self._objective is None:
139
+ self.build()
140
+ return self._objective
141
+
142
+ @property
143
+ def H(self):
144
+ """ Return the Hamiltonian as parts C, J """
145
+
146
+ if self._C is None:
147
+ self.build()
148
+ return self._C, self._J
149
+
150
+ class WeightedMaxKCutModel(MaxKCutModel):
151
+
152
+ def __init__(self, G: nx.Graph, k: int, weight_label : str = "weight"):
153
+ super().__init__(G, k)
154
+
155
+ self.weight_label = weight_label
156
+
157
+ def _build_objective(self):
158
+
159
+ node_map = self.variables
160
+ G = self.G
161
+ m = len(G.nodes)
162
+ n = self.k * m
163
+ # construct the quadratic portion of the objective
164
+ # the linear portion is 0
165
+ objective = np.zeros((n, n), dtype=np.float32)
166
+ # increment the joint variable terms indicating the nodes are in different sets
167
+ pairs = [(i, j) for i in range(self.k) for j in range(self.k) if i!=j]
168
+ for u, v in G.edges:
169
+ i = node_map.index(u)
170
+ j = node_map.index(v)
171
+ ibase = i * self.k
172
+ jbase = j * self.k
173
+ for incr1, incr2 in pairs:
174
+ idx1 = ibase + incr1
175
+ idx2 = jbase + incr2
176
+ objective[idx1, idx2] += G[u][v][self.weight_label]
177
+ self._objective = (np.zeros((n, 1)), objective)
178
+
179
+ def getCutSize(self, partition):
180
+ cut_size = 0
181
+ for u, v in self.G.edges:
182
+ if partition[u]!=partition[v]:
183
+ cut_size += self.G[u][v][self.weight_label]
184
+ return cut_size
@@ -0,0 +1,15 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ from .classifierqboost import QBoostClassifier
3
+ from .classifierqsvm import QSVMClassifier
4
+ from .decomposition import PCA
5
+ from .forecast import ReservoirForecastModel
6
+
7
+ __all__ = [
8
+ "QBoostClassifier",
9
+ "QSVMClassifier",
10
+ "PCA",
11
+ "ReservoirForecastModel",
12
+ "LinearRegression",
13
+ "GraphClustering",
14
+ "Clustering",
15
+ ]