eqc-models 0.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. eqc_models-0.9.8.data/platlib/compile_extensions.py +23 -0
  2. eqc_models-0.9.8.data/platlib/eqc_models/__init__.py +15 -0
  3. eqc_models-0.9.8.data/platlib/eqc_models/algorithms/__init__.py +4 -0
  4. eqc_models-0.9.8.data/platlib/eqc_models/algorithms/base.py +10 -0
  5. eqc_models-0.9.8.data/platlib/eqc_models/algorithms/penaltymultiplier.py +169 -0
  6. eqc_models-0.9.8.data/platlib/eqc_models/allocation/__init__.py +6 -0
  7. eqc_models-0.9.8.data/platlib/eqc_models/allocation/allocation.py +367 -0
  8. eqc_models-0.9.8.data/platlib/eqc_models/allocation/portbase.py +128 -0
  9. eqc_models-0.9.8.data/platlib/eqc_models/allocation/portmomentum.py +137 -0
  10. eqc_models-0.9.8.data/platlib/eqc_models/assignment/__init__.py +5 -0
  11. eqc_models-0.9.8.data/platlib/eqc_models/assignment/qap.py +82 -0
  12. eqc_models-0.9.8.data/platlib/eqc_models/assignment/setpartition.py +170 -0
  13. eqc_models-0.9.8.data/platlib/eqc_models/base/__init__.py +72 -0
  14. eqc_models-0.9.8.data/platlib/eqc_models/base/base.py +150 -0
  15. eqc_models-0.9.8.data/platlib/eqc_models/base/constraints.py +276 -0
  16. eqc_models-0.9.8.data/platlib/eqc_models/base/operators.py +201 -0
  17. eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.c +11363 -0
  18. eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
  19. eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.pyx +72 -0
  20. eqc_models-0.9.8.data/platlib/eqc_models/base/polynomial.py +274 -0
  21. eqc_models-0.9.8.data/platlib/eqc_models/base/quadratic.py +250 -0
  22. eqc_models-0.9.8.data/platlib/eqc_models/decoding.py +20 -0
  23. eqc_models-0.9.8.data/platlib/eqc_models/graph/__init__.py +5 -0
  24. eqc_models-0.9.8.data/platlib/eqc_models/graph/base.py +63 -0
  25. eqc_models-0.9.8.data/platlib/eqc_models/graph/hypergraph.py +307 -0
  26. eqc_models-0.9.8.data/platlib/eqc_models/graph/maxcut.py +155 -0
  27. eqc_models-0.9.8.data/platlib/eqc_models/graph/maxkcut.py +184 -0
  28. eqc_models-0.9.8.data/platlib/eqc_models/ml/__init__.py +15 -0
  29. eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierbase.py +99 -0
  30. eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqboost.py +423 -0
  31. eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqsvm.py +237 -0
  32. eqc_models-0.9.8.data/platlib/eqc_models/ml/clustering.py +323 -0
  33. eqc_models-0.9.8.data/platlib/eqc_models/ml/clusteringbase.py +112 -0
  34. eqc_models-0.9.8.data/platlib/eqc_models/ml/decomposition.py +363 -0
  35. eqc_models-0.9.8.data/platlib/eqc_models/ml/forecast.py +255 -0
  36. eqc_models-0.9.8.data/platlib/eqc_models/ml/forecastbase.py +139 -0
  37. eqc_models-0.9.8.data/platlib/eqc_models/ml/regressor.py +220 -0
  38. eqc_models-0.9.8.data/platlib/eqc_models/ml/regressorbase.py +97 -0
  39. eqc_models-0.9.8.data/platlib/eqc_models/ml/reservoir.py +106 -0
  40. eqc_models-0.9.8.data/platlib/eqc_models/sequence/__init__.py +5 -0
  41. eqc_models-0.9.8.data/platlib/eqc_models/sequence/tsp.py +217 -0
  42. eqc_models-0.9.8.data/platlib/eqc_models/solvers/__init__.py +12 -0
  43. eqc_models-0.9.8.data/platlib/eqc_models/solvers/qciclient.py +707 -0
  44. eqc_models-0.9.8.data/platlib/eqc_models/utilities/__init__.py +6 -0
  45. eqc_models-0.9.8.data/platlib/eqc_models/utilities/fileio.py +38 -0
  46. eqc_models-0.9.8.data/platlib/eqc_models/utilities/polynomial.py +137 -0
  47. eqc_models-0.9.8.data/platlib/eqc_models/utilities/qplib.py +375 -0
  48. eqc_models-0.9.8.dist-info/LICENSE.txt +202 -0
  49. eqc_models-0.9.8.dist-info/METADATA +139 -0
  50. eqc_models-0.9.8.dist-info/RECORD +52 -0
  51. eqc_models-0.9.8.dist-info/WHEEL +5 -0
  52. eqc_models-0.9.8.dist-info/top_level.txt +2 -0
@@ -0,0 +1,139 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ # Import libs
3
+ import numpy as np
4
+ import pandas as pd
5
+ from sklearn.metrics import (
6
+ mean_squared_error,
7
+ mean_absolute_percentage_error,
8
+ )
9
+
10
+
11
+ # Define a base class for forecast models
12
+ class BaseForecastModel:
13
+ """
14
+ A base class for forecast models.
15
+ """
16
+ def __init__(self):
17
+ pass
18
+
19
+ def prep_fea_targs(
20
+ self,
21
+ fea_data: np.array,
22
+ targ_data: np.array,
23
+ window_size: int = 1,
24
+ horizon_size: int = 1,
25
+ ):
26
+ num_records = fea_data.shape[0]
27
+
28
+ assert (
29
+ targ_data.shape[0] == num_records
30
+ ), "Inconsistent dimensions!"
31
+
32
+ step_vec = np.arange(num_records)
33
+
34
+ num_fea_dims = fea_data.shape[1]
35
+ num_targ_dims = targ_data.shape[1]
36
+
37
+ X = []
38
+ y = []
39
+ steps = []
40
+ for i in range(num_records - window_size - horizon_size + 1):
41
+ fea_seq = fea_data[i : i + window_size]
42
+ targ_seq = targ_data[
43
+ i + window_size : i + window_size + horizon_size
44
+ ]
45
+
46
+ assert fea_seq.shape[0] == window_size
47
+ assert fea_seq.shape[1] == num_fea_dims
48
+ assert targ_seq.shape[0] == horizon_size
49
+ assert targ_seq.shape[1] == num_targ_dims
50
+
51
+ step_seq = step_vec[
52
+ i + window_size : i + window_size + horizon_size
53
+ ]
54
+
55
+ assert step_seq.shape[0] == horizon_size
56
+
57
+ fea_seq = fea_seq.reshape((num_fea_dims * window_size))
58
+ targ_seq = targ_seq.reshape((num_targ_dims * horizon_size))
59
+ step_seq = step_seq.reshape((horizon_size,))
60
+
61
+ X.append(fea_seq)
62
+ y.append(targ_seq)
63
+ steps.append(step_seq)
64
+
65
+ X = np.array(X)
66
+ y = np.array(y)
67
+ steps = np.array(steps)
68
+
69
+ assert X.shape[0] == y.shape[0]
70
+ assert len(steps) == X.shape[0]
71
+
72
+ assert X.shape[1] == num_fea_dims * window_size
73
+ assert y.shape[1] == num_targ_dims * horizon_size
74
+
75
+ return X, y, steps
76
+
77
+ def prep_out_of_sample(
78
+ self,
79
+ fea_data: np.array,
80
+ window_size: int = 1,
81
+ horizon_size: int = 1,
82
+ ):
83
+ num_records = fea_data.shape[0]
84
+
85
+ num_fea_dims = fea_data.shape[1]
86
+
87
+ fea_seq = fea_data[num_records - window_size : num_records]
88
+
89
+ assert fea_seq.shape[0] == window_size
90
+ assert fea_seq.shape[1] == num_fea_dims
91
+
92
+ fea_seq = fea_seq.reshape((num_fea_dims * window_size))
93
+
94
+ X = np.array([fea_seq])
95
+
96
+ return X
97
+
98
+ def generate_pred_df(
99
+ self,
100
+ y: np.array,
101
+ y_pred: np.array,
102
+ dates: np.array,
103
+ ):
104
+ num_records = y.shape[0]
105
+ num_targ_dims = y.shape[1]
106
+
107
+ assert y_pred.shape[0] == num_records
108
+ assert y_pred.shape[1] == num_targ_dims
109
+ assert dates.shape[0] == num_records
110
+ assert dates.shape[1] == num_targ_dims
111
+
112
+ tmp_size = num_records * num_targ_dims
113
+ pred_df = pd.DataFrame(
114
+ {
115
+ "Date": dates.reshape((tmp_size)),
116
+ "Actual": y.reshape((tmp_size)),
117
+ "Predicted": y_pred.reshape((tmp_size)),
118
+ }
119
+ )
120
+
121
+ pred_df = pred_df.groupby("Date", as_index=False)[
122
+ "Actual", "Predicted"
123
+ ].mean()
124
+
125
+ return pred_df
126
+
127
+ def get_stats(self, y, y_pred):
128
+ mape = mean_absolute_percentage_error(y, y_pred)
129
+ rmse = np.sqrt(mean_squared_error(y, y_pred))
130
+
131
+ stats_hash = {"MAPE": mape, "RMSE": rmse}
132
+
133
+ return stats_hash
134
+
135
+ def fit(self, data: pd.DataFrame):
136
+ pass
137
+
138
+ def predict(self, X: np.array):
139
+ pass
@@ -0,0 +1,220 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ # Import libs
3
+ import os
4
+ import sys
5
+ import time
6
+ import datetime
7
+ import json
8
+ import warnings
9
+ from functools import wraps
10
+ import numpy as np
11
+ from sklearn.preprocessing import MinMaxScaler
12
+
13
+ from eqc_models.ml.regressorbase import RegressorBase
14
+
15
+
16
+ def timer(func):
17
+ @wraps(func)
18
+ def wrapper(*args, **kwargs):
19
+ beg_time = time.time()
20
+ val = func(*args, **kwargs)
21
+ end_time = time.time()
22
+ tot_time = end_time - beg_time
23
+
24
+ print(
25
+ "Runtime of %s: %0.2f seconds!"
26
+ % (
27
+ func.__name__,
28
+ tot_time,
29
+ )
30
+ )
31
+
32
+ return val
33
+
34
+ return wrapper
35
+
36
+
37
+ class LinearRegression(RegressorBase):
38
+ """An implementation of linear regression that uses QCi's Dirac-3.
39
+
40
+ Parameters
41
+ ----------
42
+
43
+ relaxation_schedule: Relaxation schedule used by Dirac-3;
44
+ default: 2.
45
+
46
+ num_samples: Number of samples used by Dirac-3; default: 1.
47
+
48
+ l2_reg_coef: L2 regularization penalty multiplier; default: 0.
49
+
50
+ alpha: A penalty multiplier to ensure the correct sign of a
51
+ model parameter; default: 0.
52
+
53
+ Examples
54
+ ---------
55
+
56
+ >>> X_train = np.array([[1], [2], [3], [4], [5]])
57
+ >>> y_train = np.array([3, 5, 7, 9, 11])
58
+ >>> X_test = np.array([[6], [7], [8]])
59
+ >>> y_test = np.array([13, 15, 17])
60
+ >>> from eqc_models.ml.regressor import LinearRegression
61
+ >>> from contextlib import redirect_stdout
62
+ >>> import io
63
+ >>> f = io.StringIO()
64
+ >>> with redirect_stdout(f):
65
+ ... model = LinearRegression()
66
+ ... model = model.fit(X_train, y_train)
67
+ ... y_pred_train = model.predict(X_train)
68
+ ... y_pred_test = model.predict(X_test)
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ relaxation_schedule=2,
74
+ num_samples=1,
75
+ l2_reg_coef=0,
76
+ alpha=0,
77
+ ):
78
+ super(LinearRegression).__init__()
79
+
80
+ self.relaxation_schedule = relaxation_schedule
81
+ self.num_samples = num_samples
82
+ self.l2_reg_coef = l2_reg_coef
83
+ self.alpha = alpha
84
+ self.params = None
85
+ self.fit_intercept = None
86
+ self.resp_transformer = None
87
+ self.fea_transformer = None
88
+
89
+ @timer
90
+ def fit(self, X, y, fit_intercept=True):
91
+ """Trains a linear regression from the training set (X, y).
92
+
93
+ Parameters
94
+ ----------
95
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
96
+ The training input samples.
97
+
98
+ y : array-like of shape (n_samples,)
99
+ The target values.
100
+
101
+ fit_intercept: A boolean indicating whether an intercept
102
+ should be fit; default: True.
103
+
104
+ Returns
105
+ -------
106
+ self : object
107
+ Fitted estimator.
108
+
109
+ """
110
+
111
+ assert X.shape[0] == y.shape[0], "Inconsistent sizes!"
112
+
113
+ self.fea_transformer = MinMaxScaler(feature_range=(0, 1))
114
+ X = self.fea_transformer.fit_transform(X)
115
+
116
+ self.resp_transformer = MinMaxScaler(feature_range=(0, 1))
117
+ y = self.resp_transformer.fit_transform(y.reshape(-1, 1)).reshape(
118
+ -1
119
+ )
120
+
121
+ self.fit_intercept = fit_intercept
122
+
123
+ n_records = X.shape[0]
124
+ if fit_intercept:
125
+ X = np.concatenate([X, np.ones((n_records, 1))], axis=1)
126
+
127
+ n_features = X.shape[1]
128
+
129
+ X = np.concatenate([X, -X], axis=1)
130
+
131
+ n_dims = X.shape[1]
132
+
133
+ assert n_dims == 2 * n_features, "Internal error!"
134
+
135
+ J, C, sum_constraint = self.get_hamiltonian(X, y)
136
+
137
+ assert J.shape[0] == J.shape[1], "Inconsistent hamiltonian size!"
138
+ assert J.shape[0] == C.shape[0], "Inconsistent hamiltonian size!"
139
+
140
+ self.set_model(J, C, sum_constraint)
141
+
142
+ sol = self.solve()
143
+
144
+ assert len(sol) == C.shape[0], "Inconsistent solution size!"
145
+
146
+ self.params = self.convert_sol_to_params(sol)
147
+
148
+ return self
149
+
150
+ @timer
151
+ def predict(self, X: np.array):
152
+ """
153
+ Predicts output of the regressor for input X.
154
+
155
+ Parameters
156
+ ----------
157
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
158
+
159
+ Returns
160
+ -------
161
+ y : ndarray of shape (n_samples,)
162
+ The predicted raw output of the classifier.
163
+ """
164
+
165
+ if self.params is None:
166
+ return
167
+
168
+ X = self.fea_transformer.transform(X)
169
+
170
+ n_records = X.shape[0]
171
+ if self.fit_intercept:
172
+ X = np.concatenate([X, np.ones((n_records, 1))], axis=1)
173
+
174
+ X = np.concatenate([X, -X], axis=1)
175
+
176
+ assert X.shape[1] == len(
177
+ self.params
178
+ ), "Inconsistent dimension of X!"
179
+
180
+ y = X @ self.params
181
+
182
+ assert y.shape[0] == X.shape[0], "Internal error!"
183
+
184
+ y = self.resp_transformer.inverse_transform(
185
+ y.reshape(-1, 1)
186
+ ).reshape(-1)
187
+
188
+ return y
189
+
190
+ @timer
191
+ def get_hamiltonian(
192
+ self,
193
+ X: np.array,
194
+ y: np.array,
195
+ ):
196
+ n_dims = X.shape[1]
197
+
198
+ J = np.zeros(shape=(n_dims, n_dims), dtype=np.float32)
199
+ C = np.zeros(shape=(n_dims,), dtype=np.float32)
200
+
201
+ for i in range(n_dims):
202
+ for j in range(n_dims):
203
+ J[i][j] = np.sum(X.swapaxes(0, 1)[i] * X.swapaxes(0, 1)[j])
204
+ if i == j:
205
+ J[i][j] += self.l2_reg_coef
206
+
207
+ C[i] = -2.0 * np.sum(y * X.swapaxes(0, 1)[i])
208
+
209
+ C = C.reshape((n_dims, 1))
210
+
211
+ # Add sign penalty multiplier
212
+ n_features = int(n_dims / 2)
213
+ for i in range(n_features):
214
+ J[i][i + n_features] += self.alpha
215
+ J[i + n_features][i] += self.alpha
216
+
217
+ return J, C, 1.0
218
+
219
+ def convert_sol_to_params(self, sol):
220
+ return np.array(sol)
@@ -0,0 +1,97 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ # Import libs
3
+ import os
4
+ import sys
5
+ import time
6
+ import datetime
7
+ import json
8
+ import warnings
9
+ from functools import wraps
10
+ import numpy as np
11
+
12
+ from eqc_models import QuadraticModel
13
+ from eqc_models.solvers.qciclient import Dirac3CloudSolver
14
+
15
+
16
+ class RegressorBase(QuadraticModel):
17
+ def __init__(
18
+ self,
19
+ relaxation_schedule=2,
20
+ num_samples=1,
21
+ ):
22
+
23
+ super(self).__init__(None, None, None)
24
+
25
+ self.relaxation_schedule = relaxation_schedule
26
+ self.num_samples = num_samples
27
+ self.params = None
28
+
29
+ def predict(self, X: np.array):
30
+ pass
31
+
32
+ def get_hamiltonian(
33
+ self,
34
+ X: np.array,
35
+ y: np.array,
36
+ ):
37
+ pass
38
+
39
+ def set_model(self, J, C, sum_constraint):
40
+
41
+ # Set hamiltonians
42
+ self._C = C
43
+ self._J = J
44
+ self._H = C, J
45
+ self._sum_constraint = sum_constraint
46
+
47
+ # Set domains
48
+ num_variables = C.shape[0]
49
+ self.domains = sum_constraint * np.ones((num_variables,))
50
+
51
+ return
52
+
53
+ def solve(self):
54
+ solver = Dirac3CloudSolver()
55
+ response = solver.solve(
56
+ self,
57
+ sum_constraint=self._sum_constraint,
58
+ relaxation_schedule=self.relaxation_schedule,
59
+ solution_precision=1,
60
+ num_samples=self.num_samples,
61
+ )
62
+
63
+ min_id = np.argmin(response["results"]["energies"])
64
+
65
+ sol = response["results"]["solutions"][min_id]
66
+
67
+ print(response)
68
+
69
+ return sol
70
+
71
+ def convert_sol_to_params(self, sol):
72
+ pass
73
+
74
+ def fit(self, X, y):
75
+ return self
76
+
77
+ def get_dynamic_range(self):
78
+ C = self._C
79
+ J = self._J
80
+
81
+ if C is None:
82
+ return
83
+
84
+ if J is None:
85
+ return
86
+
87
+ absc = np.abs(C)
88
+ absj = np.abs(J)
89
+ minc = np.min(absc[absc > 0])
90
+ maxc = np.max(absc)
91
+ minj = np.min(absj[absj > 0])
92
+ maxj = np.max(absj)
93
+
94
+ minval = min(minc, minj)
95
+ maxval = max(maxc, maxj)
96
+
97
+ return 10 * np.log10(maxval / minval)
@@ -0,0 +1,106 @@
1
+ import numpy as np
2
+ from emucore_direct.client import EmuCoreClient
3
+
4
+ # Parameters
5
+ VBIAS = 0.31
6
+ GAIN = 0.72
7
+ FEATURE_SCALING = 0.1
8
+ DENSITY = 1
9
+
10
+ class QciReservoir:
11
+ """
12
+ A class designed as an interface to QCi's reservoir devices.
13
+
14
+ Parameters
15
+ ----------
16
+
17
+ ip_addr: The IP address of the device.
18
+
19
+ num_nodes: Number of reservoir network nodes.
20
+
21
+ vbias: Bias of the reservoir device; default: 0.31.
22
+
23
+ gain: Gain of the reservoir device; default: 0.72.
24
+
25
+ density: Density used for normalization of the reservoir
26
+ output; default: 1 (no normalization done).
27
+
28
+ feature_scaling: The factor used to scale the reservoir output; default: 0.1.
29
+
30
+ device: The QCi reservoir device. Currently only 'EmuCore' is
31
+ supported; default: EmuCore.
32
+
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ ip_addr: str,
38
+ num_nodes: int,
39
+ vbias: float = VBIAS,
40
+ gain: float = GAIN,
41
+ density: float = DENSITY,
42
+ feature_scaling: float = FEATURE_SCALING,
43
+ device: str = "EmuCore",
44
+ ):
45
+ assert device == "EmuCore", "Unknown device!"
46
+
47
+ self.ip_addr = ip_addr
48
+ self.num_nodes = num_nodes
49
+ self.vbias = vbias
50
+ self.gain = gain
51
+ self.density = density
52
+ self.feature_scaling = feature_scaling
53
+ self.device = device
54
+ self.client = None
55
+ self.lock_id = None
56
+
57
+ def init_reservoir(self):
58
+ self.client = EmuCoreClient(ip_addr=self.ip_addr)
59
+
60
+ self.lock_id, _, _ = self.client.wait_for_lock()
61
+
62
+ self.client.reservoir_reset(lock_id=self.lock_id)
63
+
64
+ self.client.rc_config(
65
+ lock_id=self.lock_id,
66
+ vbias=self.vbias,
67
+ gain=self.gain,
68
+ num_nodes=self.num_nodes,
69
+ num_taps=self.num_nodes,
70
+ )
71
+
72
+ def release_lock(self):
73
+ self.client.release_lock(lock_id=self.lock_id)
74
+
75
+ def push_reservoir(self, X):
76
+
77
+ assert self.client is not None, "The reservoir should be initialized!"
78
+ assert self.lock_id is not None, "The reservoir should be initialized!"
79
+
80
+ X_resp, _, _ = self.client.process_all_data(
81
+ input_data=X,
82
+ num_nodes=self.num_nodes,
83
+ density=self.density,
84
+ feature_scaling=self.feature_scaling,
85
+ lock_id=self.lock_id,
86
+ )
87
+
88
+ return X_resp
89
+
90
+ def run_reservoir(self, X_train, X_test=None):
91
+
92
+ if X_test is not None:
93
+ assert X_train.shape[1] == X_test.shape[1]
94
+
95
+ num_feas = X_train.shape[1]
96
+
97
+
98
+ X_resp_train = _push_emucore(X_train, lock_id)
99
+
100
+ X_resp_test = None
101
+ if X_test is not None:
102
+ X_resp_test = _push_emucore(X_test, lock_id)
103
+
104
+
105
+
106
+ return X_resp_train, X_resp_test
@@ -0,0 +1,5 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+
3
+ from .tsp import MTZTSPModel
4
+
5
+ __all__ = ["MTZTSPModel"]