eqc-models 0.10.3__py3-none-any.whl → 0.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. eqc_models-0.11.1.data/platlib/eqc_models/assignment/__init__.py +6 -0
  2. eqc_models-0.11.1.data/platlib/eqc_models/assignment/resource.py +165 -0
  3. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/__init__.py +3 -1
  4. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/polyeval.c +123 -123
  5. eqc_models-0.11.1.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
  6. eqc_models-0.11.1.data/platlib/eqc_models/base/results.py +166 -0
  7. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/classifierqsvm.py +197 -21
  8. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/decomposition.py +39 -16
  9. eqc_models-0.11.1.data/platlib/eqc_models/process/base.py +13 -0
  10. eqc_models-0.11.1.data/platlib/eqc_models/process/mpc.py +17 -0
  11. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/solvers/qciclient.py +6 -3
  12. {eqc_models-0.10.3.dist-info → eqc_models-0.11.1.dist-info}/METADATA +1 -1
  13. eqc_models-0.11.1.dist-info/RECORD +63 -0
  14. {eqc_models-0.10.3.dist-info → eqc_models-0.11.1.dist-info}/WHEEL +1 -1
  15. eqc_models-0.10.3.data/platlib/eqc_models/assignment/__init__.py +0 -5
  16. eqc_models-0.10.3.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
  17. eqc_models-0.10.3.dist-info/RECORD +0 -59
  18. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/compile_extensions.py +0 -0
  19. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/__init__.py +0 -0
  20. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/algorithms/__init__.py +0 -0
  21. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/algorithms/base.py +0 -0
  22. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/algorithms/penaltymultiplier.py +0 -0
  23. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/allocation/__init__.py +0 -0
  24. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/allocation/allocation.py +0 -0
  25. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/allocation/portbase.py +0 -0
  26. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/allocation/portmomentum.py +0 -0
  27. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/assignment/qap.py +0 -0
  28. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/assignment/setpartition.py +0 -0
  29. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/base.py +0 -0
  30. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/constraints.py +0 -0
  31. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/operators.py +0 -0
  32. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/polyeval.pyx +0 -0
  33. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/polynomial.py +0 -0
  34. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/base/quadratic.py +0 -0
  35. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/combinatorics/__init__.py +0 -0
  36. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/combinatorics/setcover.py +0 -0
  37. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/combinatorics/setpartition.py +0 -0
  38. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/decoding.py +0 -0
  39. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/graph/__init__.py +0 -0
  40. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/graph/base.py +0 -0
  41. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/graph/hypergraph.py +0 -0
  42. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/graph/maxcut.py +0 -0
  43. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/graph/maxkcut.py +0 -0
  44. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/graph/partition.py +0 -0
  45. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/__init__.py +0 -0
  46. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/classifierbase.py +0 -0
  47. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/classifierqboost.py +0 -0
  48. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/clustering.py +0 -0
  49. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/clusteringbase.py +0 -0
  50. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/cvqboost_hamiltonian.pyx +0 -0
  51. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.c +0 -0
  52. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.h +0 -0
  53. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/forecast.py +0 -0
  54. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/forecastbase.py +0 -0
  55. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/regressor.py +0 -0
  56. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/regressorbase.py +0 -0
  57. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/ml/reservoir.py +0 -0
  58. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/sequence/__init__.py +0 -0
  59. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/sequence/tsp.py +0 -0
  60. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/solvers/__init__.py +0 -0
  61. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/utilities/__init__.py +0 -0
  62. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/utilities/fileio.py +0 -0
  63. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/utilities/polynomial.py +0 -0
  64. {eqc_models-0.10.3.data → eqc_models-0.11.1.data}/platlib/eqc_models/utilities/qplib.py +0 -0
  65. {eqc_models-0.10.3.dist-info → eqc_models-0.11.1.dist-info}/licenses/LICENSE.txt +0 -0
  66. {eqc_models-0.10.3.dist-info → eqc_models-0.11.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,166 @@
1
+ import dataclasses
2
+ import warnings
3
+ import numpy as np
4
+
5
+ @dataclasses.dataclass
6
+ class SolutionResults:
7
+ """
8
+ The class is meant to provide a uniform interface to results, no matter
9
+ the method of running the job. If available, the metrics are reported
10
+ in nanoseconds.
11
+
12
+ Properties
13
+ ------------
14
+
15
+ solutions : np.ndarray
16
+ 2-d array of solution vectors
17
+
18
+ energies : np.ndarray
19
+ 1-d array of energies computed from the device for each sample
20
+
21
+ counts : np.ndarray
22
+ 1-d array of counts the particular sample occurred during sampling
23
+
24
+ objectives : np.ndarray
25
+ 1-d array of objective values. Is None if the model does not provide
26
+ a separate objective function
27
+
28
+ run_time : np.ndarray
29
+ 1-d array of runtimes reported by the device.
30
+
31
+ preprocessing_time : int
32
+ Single value for time spent preprocessing before sampling occurs.
33
+
34
+ postprocessing_time : np.ndarray
35
+ 1-d array of time spent post-processing samples.
36
+
37
+ penalties : np.ndarray
38
+ 1-d array of penalty values for each sample. Is None if the model does
39
+ not have constraints.
40
+
41
+ device : str
42
+ String that represents the device used to solve the model.
43
+
44
+ time_units : str
45
+ String indicator of the unit of time reported in the metrics. Only
46
+ ns is supported at this time.
47
+
48
+ """
49
+
50
+ solutions : np.ndarray
51
+ energies : np.ndarray
52
+ counts : np.ndarray
53
+ objectives : np.ndarray
54
+ run_time : np.ndarray
55
+ preprocessing_time : int
56
+ postprocessing_time : np.ndarray
57
+ penalties : np.ndarray = None
58
+ device : str = None
59
+ time_units : str = "ns"
60
+
61
+ @property
62
+ def device_time(self) -> np.ndarray:
63
+ """
64
+ 1-d array of device usage computed from preprocessing, runtime
65
+ and postprocessing time.
66
+
67
+ """
68
+ if self.run_time:
69
+ pre = self.preprocessing_time
70
+ runtime = np.sum(self.run_time)
71
+ post = np.sum(self.postprocessing_time)
72
+ return pre + runtime + post
73
+ else:
74
+ return None
75
+
76
+ @property
77
+ def total_samples(self):
78
+ return np.sum(self.counts)
79
+
80
+ @property
81
+ def best_energy(self):
82
+ return np.min(self.energies)
83
+
84
+ @classmethod
85
+ def determine_device_type(cls, device_config):
86
+ """
87
+ Use the device config object from a cloud response
88
+ to get the device info. It will have a device and job type
89
+ identifiers in it.
90
+
91
+ """
92
+ devices = [k for k in device_config.keys()]
93
+ # only one device type is supported at a time
94
+ return devices[0]
95
+
96
+ @classmethod
97
+ def from_cloud_response(cls, model, response, solver):
98
+ """ Fill in the details from the cloud """
99
+
100
+ solutions = np.array(response["results"]["solutions"])
101
+ if model.machine_slacks > 0:
102
+ solutions = solutions[:,:-model.machine_slacks]
103
+ energies = np.array(response["results"]["energies"])
104
+ # interrogate to determine the device type
105
+ try:
106
+ device_type = cls.determine_device_type(response["job_info"]["job_submission"]["device_config"])
107
+ except KeyError:
108
+ print(response.keys())
109
+ raise
110
+ if "dirac-1" in device_type:
111
+ # decode the qubo
112
+ new_solutions = []
113
+ for solution in solutions:
114
+ solution = np.array(solution)
115
+ # build an operator to map the bit vector to scalar
116
+ base_count = np.floor(np.log2(model.upper_bound))+1
117
+ assert np.sum(base_count) == solution.shape[0], "Incorrect solution-upper bound match"
118
+ m = model.upper_bound.shape[0]
119
+ n = solution.shape[0]
120
+ D = np.zeros((m, n), dtype=np.int32)
121
+ j = 0
122
+ for i in range(m):
123
+ k = int(base_count[i])
124
+ D[i, j:j+k] = 2**np.arange(k)
125
+ j += k
126
+ solution = D@solution
127
+ new_solutions.append(solution)
128
+ solutions = np.array(new_solutions)
129
+ if hasattr(model, "evaluateObjective"):
130
+ objectives = np.zeros((solutions.shape[0],), dtype=np.float32)
131
+ for i in range(solutions.shape[0]):
132
+ try:
133
+ objective = model.evaluateObjective(solutions[i])
134
+ except NotImplementedError:
135
+ warnings.warn(f"Cannot set objective value in results for {model.__class__}")
136
+ objectives = None
137
+ break
138
+ objectives[i] = objective
139
+ else:
140
+ objectives = None
141
+ if hasattr(model, "evaluatePenalties"):
142
+ penalties = np.zeros((solutions.shape[0],), dtype=np.float32)
143
+ for i in range(solutions.shape[0]):
144
+ penalties[i] = model.evaluatePenalties(solutions[i]) + model.offset
145
+ else:
146
+ penalties = None
147
+ counts = np.array(response["results"]["counts"])
148
+ job_id = response["job_info"]["job_id"]
149
+ try:
150
+ metrics = solver.client.get_job_metrics(job_id=job_id)
151
+ metrics = metrics["job_metrics"]
152
+ time_ns = metrics["time_ns"]
153
+ device = time_ns["device"][device_type]
154
+ runtime = device["samples"]["runtime"]
155
+ post = device["samples"].get("postprocessing_time", [0 for t in runtime])
156
+ pre = device["samples"].get("preprocessing_time", 0)
157
+ except KeyError:
158
+ time_ns = []
159
+ runtime = []
160
+ post = []
161
+ pre = None
162
+ results = SolutionResults(solutions, energies, counts, objectives,
163
+ runtime, pre, post, penalties=penalties,
164
+ device=device_type, time_units="ns")
165
+
166
+ return results
@@ -8,28 +8,204 @@ import json
8
8
  import warnings
9
9
  from functools import wraps
10
10
  import numpy as np
11
+ from sklearn.preprocessing import MinMaxScaler
11
12
 
12
13
  from eqc_models.ml.classifierbase import ClassifierBase
13
14
 
14
15
 
15
16
  class QSVMClassifier(ClassifierBase):
16
17
  """An implementation of QSVM classifier that uses QCi's Dirac-3.
17
-
18
+
19
+ Parameters
20
+ ----------
21
+
22
+ relaxation_schedule: Relaxation schedule used by Dirac-3; default:
23
+ 2.
24
+
25
+ num_samples: Number of samples used by Dirac-3; default: 1.
26
+
27
+ lambda_coef: The penalty multipler
28
+
29
+ Examples
30
+ -----------
31
+
32
+ >>> from sklearn import datasets
33
+ >>> from sklearn.preprocessing import MinMaxScaler
34
+ >>> from sklearn.model_selection import train_test_split
35
+ >>> iris = datasets.load_iris()
36
+ >>> X = iris.data
37
+ >>> y = iris.target
38
+ >>> scaler = MinMaxScaler()
39
+ >>> X = scaler.fit_transform(X)
40
+ >>> for i in range(len(y)):
41
+ ... if y[i] == 0:
42
+ ... y[i] = -1
43
+ ... elif y[i] == 2:
44
+ ... y[i] = 1
45
+ >>> X_train, X_test, y_train, y_test = train_test_split(
46
+ ... X,
47
+ ... y,
48
+ ... test_size=0.2,
49
+ ... random_state=42,
50
+ ... )
51
+ >>> from eqc_models.ml.classifierqsvm import QSVMClassifier
52
+ >>> obj = QSVMClassifier(
53
+ ... relaxation_schedule=2,
54
+ ... num_samples=1,
55
+ ... )
56
+ >>> from contextlib import redirect_stdout
57
+ >>> import io
58
+ >>> f = io.StringIO()
59
+ >>> with redirect_stdout(f):
60
+ ... obj = obj.fit(X_train, y_train)
61
+ ... y_train_prd = obj.predict(X_train)
62
+ ... y_test_prd = obj.predict(X_test)
63
+
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ relaxation_schedule=1,
69
+ num_samples=1,
70
+ lambda_coef=1.0,
71
+ ):
72
+ super(QSVMClassifier).__init__()
73
+
74
+ self.relaxation_schedule = relaxation_schedule
75
+ self.num_samples = num_samples
76
+ self.lambda_coef = lambda_coef
77
+ self.fea_scaler = MinMaxScaler(feature_range=(-1, 1))
78
+
79
+ def fit(self, X, y):
80
+ """
81
+ Build a QSVM classifier from the training set (X, y).
82
+
83
+ Parameters
84
+ ----------
85
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
86
+ The training input samples.
87
+
88
+ y : array-like of shape (n_samples,)
89
+ The target values.
90
+
91
+ Returns
92
+ -------
93
+ Response of Dirac-3 in JSON format.
94
+ """
95
+
96
+ assert X.shape[0] == y.shape[0], "Inconsistent sizes!"
97
+
98
+ assert set(y) == {-1, 1}, "Target values should be in {-1, 1}"
99
+
100
+ X = self.fea_scaler.fit_transform(X)
101
+
102
+ J, C, sum_constraint = self.get_hamiltonian(X, y)
103
+
104
+ assert J.shape[0] == J.shape[1], "Inconsistent hamiltonian size!"
105
+ assert J.shape[0] == C.shape[0], "Inconsistent hamiltonian size!"
106
+
107
+ self.set_model(J, C, sum_constraint)
108
+
109
+ sol, response = self.solve()
110
+
111
+ assert len(sol) == C.shape[0], "Inconsistent solution size!"
112
+
113
+ self.params = self.convert_sol_to_params(sol)
114
+
115
+ self.X_train = X
116
+ self.y_train = y
117
+
118
+ return response
119
+
120
+ def predict_raw(self, X: np.array):
121
+ """
122
+ Predict classes for X.
123
+
124
+ Parameters
125
+ ----------
126
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
127
+
128
+ Returns
129
+ -------
130
+ y : ndarray of shape (n_samples,)
131
+ The predicted classes.
132
+ """
133
+ n_records = X.shape[0]
134
+ X = self.fea_scaler.transform(X)
135
+ X_tilde = np.concatenate((X, np.ones((n_records, 1))), axis=1)
136
+
137
+ y = np.einsum("i,ki->k", self.params, X_tilde)
138
+
139
+ return y
140
+
141
+ def predict(self, X: np.array):
142
+ """
143
+ Predict classes for X.
144
+
145
+ Parameters
146
+ ----------
147
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
148
+
149
+ Returns
150
+ -------
151
+ y : ndarray of shape (n_samples,)
152
+ The predicted classes.
153
+ """
154
+
155
+ y = self.predict_raw(X)
156
+ y = np.sign(y)
157
+
158
+ return y
159
+
160
+ def get_hamiltonian(
161
+ self,
162
+ X: np.array,
163
+ y: np.array,
164
+ ):
165
+ n_records = X.shape[0]
166
+ n_dims = X.shape[1]
167
+
168
+ J = np.zeros(shape=(1 + n_dims, 1 + n_dims), dtype=np.float32)
169
+ C = np.zeros(shape=(1 + n_dims,), dtype=np.float32)
170
+
171
+ X_tilde = np.concatenate((X, np.ones((n_records, 1))), axis=1)
172
+
173
+ J = self.lambda_coef * np.einsum(
174
+ "i,ik,il->kl", y**2, X_tilde, X_tilde
175
+ )
176
+
177
+ for k in range(n_dims):
178
+ J[k][k] += 0.5
179
+
180
+ C = -2.0 * self.lambda_coef * np.einsum("i,ik->k", y, X_tilde)
181
+
182
+ J = 0.5 * (J + J.transpose())
183
+ C = C.reshape((1 + n_dims, 1))
184
+
185
+ return J, C, 1.0
186
+
187
+ def convert_sol_to_params(self, sol):
188
+ return np.array(sol)
189
+
190
+
191
+ class QSVMClassifierDual(ClassifierBase):
192
+ """An implementation of dual QSVM classifier that uses QCi's Dirac-3.
193
+
18
194
  Parameters
19
195
  ----------
20
-
196
+
21
197
  relaxation_schedule: Relaxation schedule used by Dirac-3; default:
22
198
  2.
23
-
199
+
24
200
  num_samples: Number of samples used by Dirac-3; default: 1.
25
-
201
+
26
202
  upper_limit: Coefficient upper limit; a regularization parameter;
27
203
  default: 1.0.
28
-
204
+
29
205
  gamma: Gaussian kernel parameter; default: 1.0.
30
-
206
+
31
207
  eta: A penalty multiplier; default: 1.0.
32
-
208
+
33
209
  zeta: A penalty multiplier; default: 1.0.
34
210
 
35
211
  Examples
@@ -54,8 +230,8 @@ class QSVMClassifier(ClassifierBase):
54
230
  ... test_size=0.2,
55
231
  ... random_state=42,
56
232
  ... )
57
- >>> from eqc_models.ml.classifierqsvm import QSVMClassifier
58
- >>> obj = QSVMClassifier(
233
+ >>> from eqc_models.ml.classifierqsvm import QSVMClassifierDual
234
+ >>> obj = QSVMClassifierDual(
59
235
  ... relaxation_schedule=2,
60
236
  ... num_samples=1,
61
237
  ... upper_limit=1.0,
@@ -70,9 +246,9 @@ class QSVMClassifier(ClassifierBase):
70
246
  ... obj = obj.fit(X_train, y_train)
71
247
  ... y_train_prd = obj.predict(X_train)
72
248
  ... y_test_prd = obj.predict(X_test)
73
-
249
+
74
250
  """
75
-
251
+
76
252
  def __init__(
77
253
  self,
78
254
  relaxation_schedule=2,
@@ -81,8 +257,8 @@ class QSVMClassifier(ClassifierBase):
81
257
  gamma=1.0,
82
258
  eta=1.0,
83
259
  zeta=1.0,
84
- ):
85
- super(QSVMClassifier).__init__()
260
+ ):
261
+ super(QSVMClassifierDual).__init__()
86
262
 
87
263
  self.relaxation_schedule = relaxation_schedule
88
264
  self.num_samples = num_samples
@@ -97,20 +273,20 @@ class QSVMClassifier(ClassifierBase):
97
273
  def fit(self, X, y):
98
274
  """
99
275
  Build a QSVM classifier from the training set (X, y).
100
-
276
+
101
277
  Parameters
102
278
  ----------
103
279
  X : {array-like, sparse matrix} of shape (n_samples, n_features)
104
- The training input samples.
105
-
280
+ The training input samples.
281
+
106
282
  y : array-like of shape (n_samples,)
107
283
  The target values.
108
-
284
+
109
285
  Returns
110
286
  -------
111
287
  Response of Dirac-3 in JSON format.
112
288
  """
113
-
289
+
114
290
  assert X.shape[0] == y.shape[0], "Inconsistent sizes!"
115
291
 
116
292
  assert set(y) == {-1, 1}, "Target values should be in {-1, 1}"
@@ -144,17 +320,17 @@ class QSVMClassifier(ClassifierBase):
144
320
  def predict(self, X: np.array):
145
321
  """
146
322
  Predict classes for X.
147
-
323
+
148
324
  Parameters
149
325
  ----------
150
326
  X : {array-like, sparse matrix} of shape (n_samples, n_features)
151
-
327
+
152
328
  Returns
153
329
  -------
154
330
  y : ndarray of shape (n_samples,)
155
331
  The predicted classes.
156
332
  """
157
-
333
+
158
334
  assert self.X_train is not None, "Model not trained yet!"
159
335
  assert self.y_train is not None, "Model not trained yet!"
160
336
 
@@ -21,17 +21,17 @@ from eqc_models.solvers.qciclient import (
21
21
 
22
22
  class DecompBase(QuadraticModel):
23
23
  """An Base class for decomposition algorithms.
24
-
24
+
25
25
  Parameters
26
26
  ----------
27
-
27
+
28
28
  relaxation_schedule: Relaxation schedule used by Dirac-3; default:
29
29
  2.
30
-
30
+
31
31
  num_samples: Number of samples used by Dirac-3; default: 1.
32
32
 
33
33
  """
34
-
34
+
35
35
  def __init__(
36
36
  self,
37
37
  relaxation_schedule=2,
@@ -163,8 +163,7 @@ class DecompBase(QuadraticModel):
163
163
 
164
164
 
165
165
  class PCA(DecompBase):
166
- """
167
- An implementation of Principal component analysis (PCA) that
166
+ """An implementation of Principal component analysis (PCA) that
168
167
  uses QCi's Dirac-3.
169
168
 
170
169
  Linear dimensionality reduction using Singular Value
@@ -173,18 +172,22 @@ class PCA(DecompBase):
173
172
 
174
173
  Parameters
175
174
  ----------
176
-
175
+
177
176
  n_components: Number of components to keep; if n_components is not
178
177
  set all components are kept; default: None.
179
-
178
+
180
179
  relaxation_schedule: Relaxation schedule used by Dirac-3; default:
181
180
  2.
182
-
181
+
183
182
  num_samples: Number of samples used by Dirac-3; default: 1.
184
183
 
184
+ mode: Compute the largest or smallest principal components,
185
+ largest_components vs. smallest_components; default:
186
+ largest_components.
187
+
185
188
  Examples
186
189
  -----------
187
-
190
+
188
191
  >>> from sklearn import datasets
189
192
  >>> iris = datasets.load_iris()
190
193
  >>> X = iris.data
@@ -202,18 +205,25 @@ class PCA(DecompBase):
202
205
  ... num_samples=1,
203
206
  ... )
204
207
  ... X_pca = obj.fit_transform(X)
205
-
208
+
206
209
  """
207
-
210
+
208
211
  def __init__(
209
212
  self,
210
213
  n_components=None,
211
214
  relaxation_schedule=2,
212
215
  num_samples=1,
216
+ mode="largest_components",
213
217
  ):
214
218
  self.n_components = n_components
215
219
  self.relaxation_schedule = relaxation_schedule
216
220
  self.num_samples = num_samples
221
+
222
+ assert mode in ["largest_components", "smallest_components"], (
223
+ "Invalid value of mode <%s>" % mode
224
+ )
225
+
226
+ self.mode = mode
217
227
  self.X = None
218
228
  self.X_pca = None
219
229
 
@@ -224,12 +234,20 @@ class PCA(DecompBase):
224
234
  num_records = X.shape[0]
225
235
  num_features = X.shape[1]
226
236
 
227
- J = -np.matmul(X.transpose(), X)
237
+ J = np.matmul(X.transpose(), X)
228
238
 
229
239
  assert J.shape[0] == num_features
230
240
  assert J.shape[1] == num_features
231
241
 
232
- C = np.zeros((num_features, 1))
242
+ C = -np.sum(J, axis=1)
243
+
244
+ assert C.shape[0] == num_features
245
+
246
+ C = C.reshape((num_features, 1))
247
+
248
+ if self.mode == "largest_components":
249
+ J = -J
250
+ C = -C
233
251
 
234
252
  return J, C
235
253
 
@@ -239,12 +257,17 @@ class PCA(DecompBase):
239
257
  assert J.shape[0] == J.shape[1], "Inconsistent hamiltonian size!"
240
258
  assert J.shape[0] == C.shape[0], "Inconsistent hamiltonian size!"
241
259
 
260
+ sum_constraint = 0.5 * (1.0 + C.shape[0])
261
+
242
262
  self._set_model(J, C, 1.0)
243
263
 
244
264
  sol, response = self._solve()
245
265
 
246
266
  assert len(sol) == C.shape[0], "Inconsistent solution size!"
247
267
 
268
+ sol = np.array(sol)
269
+ sol = 2.0 * sol - 1.0
270
+
248
271
  fct = np.linalg.norm(sol)
249
272
  if fct > 0:
250
273
  fct = 1.0 / fct
@@ -282,7 +305,7 @@ class PCA(DecompBase):
282
305
 
283
306
  Returns
284
307
  -------
285
- responses.
308
+ responses.
286
309
  A dirct containing Dirac responses.
287
310
  """
288
311
 
@@ -310,7 +333,7 @@ class PCA(DecompBase):
310
333
  assert X.shape == self.X.shape, "Inconsistent size!"
311
334
 
312
335
  resp_hash["component_%d_response" % (i + 1)] = resp
313
-
336
+
314
337
  self.X_pca = np.array(self.X_pca).transpose()
315
338
 
316
339
  assert self.X_pca.shape[0] == self.X.shape[0]
@@ -0,0 +1,13 @@
1
+ import networkx as nx
2
+ from eqc_models.base import ConstrainedPolynomialModel
3
+
4
+ class ProcessModel:
5
+ def __init__(self, G : nx.DiGraph):
6
+ self.G = G
7
+
8
+ @staticmethod
9
+ def process_constraints(G : nx.DiGraph):
10
+ """ Build process constraints from the graph """
11
+
12
+ def constraints(self) -> Tuple[np.ndarray]:
13
+ """ """
@@ -0,0 +1,17 @@
1
+ from eqc_models.process.base import ProcessModel
2
+
3
+ class ModelPredictiveControl(ProcessModel):
4
+ """ Base class for implementing MPC optimization problems """
5
+
6
+ def __init__(self, G : nx.Graph, V : int=1, V_T:float=0.0):
7
+ self.processArgs(*args, **kwargs)
8
+ self.T = T
9
+ self.V_T = V_T
10
+ super(ModelPredictiveControl, self).__init__(G)
11
+
12
+ def processArgs(self, *args, **kwargs):
13
+ """ Provide a method to capture arguments necessary for configuring an instance """
14
+ raise NotImplementedError("subclass must implement processArgs")
15
+
16
+ def constraints(self) -> Tuple[np.ndarray]:
17
+
@@ -519,7 +519,8 @@ class Dirac3CloudSolver(Dirac3Mixin, QciClientSolver):
519
519
  of photons that are present in a given quantum state.
520
520
  Modify this value to control the relaxation schedule more
521
521
  precisely than the four presets given in schedules 1
522
- through 4. Allowed values are decimals between 0.1 and 2.
522
+ through 4. Allowed values are decimals between 0.000133333333333 and
523
+ 0.001.
523
524
  quantum_fluctuation_coefficient: int
524
525
  an integer value which Sets the amount of loss introduced
525
526
  into the system for each loop during the measurement process.
@@ -622,7 +623,8 @@ class Dirac3IntegerCloudSolver(Dirac3Mixin, QciClientSolver):
622
623
  of photons that are present in a given quantum state.
623
624
  Modify this value to control the relaxation schedule more
624
625
  precisely than the four presets given in schedules 1
625
- through 4. Allowed values are decimals between 0.1 and 2.
626
+ through 4. Allowed values are decimals between 0.000133333333333
627
+ and 0.001.
626
628
  quantum_fluctuation_coefficient: int
627
629
  an integer value which Sets the amount of loss introduced
628
630
  into the system for each loop during the measurement process.
@@ -716,7 +718,8 @@ class Dirac3ContinuousCloudSolver(Dirac3Mixin, QciClientSolver):
716
718
  of photons that are present in a given quantum state.
717
719
  Modify this value to control the relaxation schedule more
718
720
  precisely than the four presets given in schedules 1
719
- through 4. Allowed values are decimals between 0.1 and 2.
721
+ through 4. Allowed values are decimals between 0.000133333333333
722
+ and 0.001.
720
723
  quantum_fluctuation_coefficient: int
721
724
  an integer value which Sets the amount of loss introduced
722
725
  into the system for each loop during the measurement process.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eqc-models
3
- Version: 0.10.3
3
+ Version: 0.11.1
4
4
  Summary: Optimization and ML modeling package targeting EQC devices
5
5
  Author: Quantum Computing Inc.
6
6
  Author-email: support@quantumcomputinginc.com