eqc-models 0.13.0__py3-none-any.whl → 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/algorithms/penaltymultiplier.py +0 -1
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/assignment/resource.py +1 -1
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/polyeval.c +122 -122
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/results.py +18 -16
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/classifierbase.py +0 -4
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/classifierqboost.py +114 -72
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/clustering.py +29 -1
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/clusteringbase.py +29 -6
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/decomposition.py +50 -10
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/regressor.py +14 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/regressorbase.py +24 -5
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/reservoir.py +17 -2
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/solvers/__init__.py +1 -1
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/solvers/eqcdirect.py +18 -2
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/solvers/mip.py +4 -4
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/utilities/__init__.py +2 -1
- eqc_models-0.14.1.data/platlib/eqc_models/utilities/general.py +83 -0
- {eqc_models-0.13.0.dist-info → eqc_models-0.14.1.dist-info}/METADATA +6 -4
- eqc_models-0.14.1.dist-info/RECORD +70 -0
- eqc_models-0.13.0.dist-info/RECORD +0 -69
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/compile_extensions.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/algorithms/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/algorithms/base.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/allocation/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/allocation/allocation.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/allocation/portbase.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/allocation/portmomentum.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/assignment/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/assignment/qap.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/assignment/setpartition.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/base.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/binaries.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/constraints.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/operators.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/polyeval.pyx +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/polynomial.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/base/quadratic.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/combinatorics/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/combinatorics/setcover.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/combinatorics/setpartition.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/decoding.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/base.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/hypergraph.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/maxcut.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/maxkcut.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/partition.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/rcshortestpath.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/graph/shortestpath.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/classifierqsvm.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/cvqboost_hamiltonian.pyx +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.c +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.h +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/forecast.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/ml/forecastbase.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/process/base.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/process/mpc.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/sequence/__init__.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/sequence/tsp.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/solvers/qciclient.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/solvers/responselog.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/utilities/fileio.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/utilities/polynomial.py +0 -0
- {eqc_models-0.13.0.data → eqc_models-0.14.1.data}/platlib/eqc_models/utilities/qplib.py +0 -0
- {eqc_models-0.13.0.dist-info → eqc_models-0.14.1.dist-info}/WHEEL +0 -0
- {eqc_models-0.13.0.dist-info → eqc_models-0.14.1.dist-info}/licenses/LICENSE.txt +0 -0
- {eqc_models-0.13.0.dist-info → eqc_models-0.14.1.dist-info}/top_level.txt +0 -0
|
Binary file
|
|
@@ -153,14 +153,19 @@ class SolutionResults:
|
|
|
153
153
|
solutions = np.array(new_solutions)
|
|
154
154
|
if hasattr(model, "evaluateObjective"):
|
|
155
155
|
objectives = np.zeros((solutions.shape[0],), dtype=np.float32)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
156
|
+
try:
|
|
157
|
+
objectives[:] = model.evaluateObjective(solutions)
|
|
158
|
+
except NotImplementedError:
|
|
159
|
+
warnings.warn(f"Cannot evaluate objective value in results for {model.__class__}. Method not implemented.")
|
|
160
|
+
objectives = None
|
|
161
|
+
# for i in range(solutions.shape[0]):
|
|
162
|
+
# try:
|
|
163
|
+
# objective = model.evaluateObjective(solutions[i])
|
|
164
|
+
# except NotImplementedError:
|
|
165
|
+
# warnings.warn(f"Cannot set objective value in results for {model.__class__}")
|
|
166
|
+
# objectives = None
|
|
167
|
+
# break
|
|
168
|
+
# objectives[i] = objective
|
|
164
169
|
else:
|
|
165
170
|
objectives = None
|
|
166
171
|
if hasattr(model, "evaluatePenalties"):
|
|
@@ -217,14 +222,11 @@ class SolutionResults:
|
|
|
217
222
|
device_type = info_dict["device_type"]
|
|
218
223
|
if hasattr(model, "evaluateObjective"):
|
|
219
224
|
objectives = np.zeros((solutions.shape[0],), dtype=np.float32)
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
objectives = None
|
|
226
|
-
break
|
|
227
|
-
objectives[i] = objective
|
|
225
|
+
try:
|
|
226
|
+
objectives[:] = model.evaluateObjective(solutions)
|
|
227
|
+
except NotImplementedError:
|
|
228
|
+
warnings.warn(f"Cannot evaluate objective value in results for {model.__class__}. Method not implemented.")
|
|
229
|
+
objectives = None
|
|
228
230
|
else:
|
|
229
231
|
objectives = None
|
|
230
232
|
if hasattr(model, "evaluatePenalties"):
|
|
@@ -31,10 +31,6 @@ class ClassifierBase(QuadraticModel):
|
|
|
31
31
|
|
|
32
32
|
assert solver_access in ["cloud", "direct"]
|
|
33
33
|
|
|
34
|
-
if solver_access == "direct":
|
|
35
|
-
assert ip_addr is not None, "ip_addr should be set when using direct solver!"
|
|
36
|
-
assert port is not None,"port should be set when using direct solver!"
|
|
37
|
-
|
|
38
34
|
self.solver_access = solver_access
|
|
39
35
|
self.ip_addr = ip_addr
|
|
40
36
|
self.port = port
|
|
@@ -16,9 +16,16 @@ from sklearn.naive_bayes import GaussianNB
|
|
|
16
16
|
from sklearn.linear_model import LogisticRegression
|
|
17
17
|
from sklearn.gaussian_process import GaussianProcessClassifier
|
|
18
18
|
from sklearn.gaussian_process.kernels import RBF
|
|
19
|
-
|
|
19
|
+
from sklearn.neighbors import KNeighborsClassifier
|
|
20
|
+
from sklearn.discriminant_analysis import (
|
|
21
|
+
LinearDiscriminantAnalysis,
|
|
22
|
+
QuadraticDiscriminantAnalysis,
|
|
23
|
+
)
|
|
24
|
+
from lightgbm import LGBMClassifier
|
|
25
|
+
from xgboost import XGBClassifier
|
|
20
26
|
from eqc_models.ml.classifierbase import ClassifierBase
|
|
21
|
-
|
|
27
|
+
|
|
28
|
+
# from eqc_models.ml.cvqboost_hamiltonian import get_hamiltonian_pyx
|
|
22
29
|
|
|
23
30
|
|
|
24
31
|
def timer(func):
|
|
@@ -48,8 +55,7 @@ class WeakClassifier:
|
|
|
48
55
|
X_train,
|
|
49
56
|
y_train,
|
|
50
57
|
weak_cls_type,
|
|
51
|
-
|
|
52
|
-
min_samples_split=100,
|
|
58
|
+
weak_cls_params={},
|
|
53
59
|
num_jobs=1,
|
|
54
60
|
):
|
|
55
61
|
assert X_train.shape[0] == len(y_train)
|
|
@@ -58,30 +64,34 @@ class WeakClassifier:
|
|
|
58
64
|
self.y_train = y_train
|
|
59
65
|
|
|
60
66
|
if weak_cls_type == "dct":
|
|
61
|
-
self.clf = DecisionTreeClassifier(
|
|
62
|
-
max_depth=max_depth,
|
|
63
|
-
min_samples_split=min_samples_split,
|
|
64
|
-
random_state=0,
|
|
65
|
-
)
|
|
67
|
+
self.clf = DecisionTreeClassifier(**weak_cls_params)
|
|
66
68
|
elif weak_cls_type == "nb":
|
|
67
|
-
self.clf = GaussianNB()
|
|
69
|
+
self.clf = GaussianNB(**weak_cls_params)
|
|
68
70
|
elif weak_cls_type == "lg":
|
|
69
|
-
self.clf = LogisticRegression(
|
|
71
|
+
self.clf = LogisticRegression(**weak_cls_params)
|
|
70
72
|
elif weak_cls_type == "gp":
|
|
71
|
-
self.clf = GaussianProcessClassifier(
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
73
|
+
self.clf = GaussianProcessClassifier(**weak_cls_params)
|
|
74
|
+
elif weak_cls_type == "knn":
|
|
75
|
+
self.clf = KNeighborsClassifier(**weak_cls_params)
|
|
76
|
+
elif weak_cls_type == "lda":
|
|
77
|
+
self.clf = LinearDiscriminantAnalysis(**weak_cls_params)
|
|
78
|
+
elif weak_cls_type == "qda":
|
|
79
|
+
self.clf = QuadraticDiscriminantAnalysis(**weak_cls_params)
|
|
80
|
+
elif weak_cls_type == "lgb":
|
|
81
|
+
self.clf = LGBMClassifier(**weak_cls_params)
|
|
82
|
+
elif weak_cls_type == "xgb":
|
|
83
|
+
self.clf = XGBClassifier(**weak_cls_params)
|
|
75
84
|
else:
|
|
76
85
|
assert False, (
|
|
77
86
|
"Unknown weak classifier type <%s>!" % weak_cls_type
|
|
78
87
|
)
|
|
79
88
|
|
|
80
89
|
def train(self):
|
|
81
|
-
self.clf.fit(self.X_train, self.y_train)
|
|
90
|
+
self.clf.fit(self.X_train, np.where(self.y_train == -1, 0, 1))
|
|
82
91
|
|
|
83
92
|
def predict(self, X):
|
|
84
|
-
|
|
93
|
+
y_pred = self.clf.predict(X)
|
|
94
|
+
return np.where(y_pred == 0, -1, 1)
|
|
85
95
|
|
|
86
96
|
|
|
87
97
|
class QBoostClassifier(ClassifierBase):
|
|
@@ -97,9 +107,9 @@ class QBoostClassifier(ClassifierBase):
|
|
|
97
107
|
solver_access: Solver access type: cloud or direct; default: cloud.
|
|
98
108
|
|
|
99
109
|
ip_addr: IP address of the device when direct access is used; default: None.
|
|
100
|
-
|
|
101
|
-
port: Port number of the device when direct access is used; default: None.
|
|
102
|
-
|
|
110
|
+
|
|
111
|
+
port: Port number of the device when direct access is used; default: None.
|
|
112
|
+
|
|
103
113
|
lambda_coef: A penalty multiplier; default: 0.
|
|
104
114
|
|
|
105
115
|
weak_cls_schedule: Weak classifier schedule. Is either 1, 2,
|
|
@@ -110,15 +120,24 @@ class QBoostClassifier(ClassifierBase):
|
|
|
110
120
|
- nb: Naive Baysian classifier
|
|
111
121
|
- lg: Logistic regression
|
|
112
122
|
- gp: Gaussian process classifier
|
|
123
|
+
- knn: K-nearest neighbors classifier
|
|
124
|
+
- lda: Linear discriminant analysis classifier
|
|
125
|
+
- qda: Quadratic discriminant analysis classifier
|
|
126
|
+
- lgb: Light-GBM classifier
|
|
127
|
+
- xgb: XGBoost classifier
|
|
113
128
|
|
|
114
|
-
default:
|
|
129
|
+
default: lg.
|
|
115
130
|
|
|
116
|
-
|
|
117
|
-
|
|
131
|
+
weak_cls_params: Dict of weak classifier parameters. Default: {};
|
|
132
|
+
use default parameters.
|
|
118
133
|
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
134
|
+
weak_cls_strategy: Computation strategy for weak classifier
|
|
135
|
+
training, either "sequential" or "multi_processing". Default:
|
|
136
|
+
"multi_processing".
|
|
137
|
+
|
|
138
|
+
weak_cls_num_jobs: Number of jobs when
|
|
139
|
+
weak_cls_strategy="multi_processing". Default: None; use all
|
|
140
|
+
available cores.
|
|
122
141
|
|
|
123
142
|
Examples
|
|
124
143
|
-----------
|
|
@@ -155,6 +174,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
155
174
|
... obj.fit(X_train, y_train)
|
|
156
175
|
... y_train_prd = obj.predict(X_train)
|
|
157
176
|
... y_test_prd = obj.predict(X_test)
|
|
177
|
+
|
|
158
178
|
"""
|
|
159
179
|
|
|
160
180
|
def __init__(
|
|
@@ -165,24 +185,35 @@ class QBoostClassifier(ClassifierBase):
|
|
|
165
185
|
ip_addr=None,
|
|
166
186
|
port=None,
|
|
167
187
|
lambda_coef=0,
|
|
168
|
-
weak_cls_schedule=
|
|
188
|
+
weak_cls_schedule=1,
|
|
169
189
|
weak_cls_type="lg",
|
|
170
|
-
|
|
171
|
-
weak_min_samples_split=100,
|
|
190
|
+
weak_cls_params={},
|
|
172
191
|
weak_cls_strategy="multi_processing",
|
|
173
192
|
weak_cls_num_jobs=None,
|
|
193
|
+
weak_cls_pair_count=None,
|
|
174
194
|
):
|
|
175
195
|
super(QBoostClassifier).__init__()
|
|
176
196
|
|
|
177
197
|
assert weak_cls_schedule in [1, 2, 3]
|
|
178
|
-
assert weak_cls_type in [
|
|
198
|
+
assert weak_cls_type in [
|
|
199
|
+
"dct",
|
|
200
|
+
"nb",
|
|
201
|
+
"lg",
|
|
202
|
+
"gp",
|
|
203
|
+
"knn",
|
|
204
|
+
"lda",
|
|
205
|
+
"qda",
|
|
206
|
+
"lgb",
|
|
207
|
+
"xgb",
|
|
208
|
+
]
|
|
209
|
+
|
|
179
210
|
assert weak_cls_strategy in [
|
|
180
211
|
"multi_processing",
|
|
181
212
|
"multi_processing_shm",
|
|
182
213
|
"sequential",
|
|
183
214
|
]
|
|
184
215
|
assert solver_access in ["cloud", "direct"]
|
|
185
|
-
|
|
216
|
+
|
|
186
217
|
self.relaxation_schedule = relaxation_schedule
|
|
187
218
|
self.num_samples = num_samples
|
|
188
219
|
self.solver_access = solver_access
|
|
@@ -191,18 +222,29 @@ class QBoostClassifier(ClassifierBase):
|
|
|
191
222
|
self.lambda_coef = lambda_coef
|
|
192
223
|
self.weak_cls_schedule = weak_cls_schedule
|
|
193
224
|
self.weak_cls_type = weak_cls_type
|
|
194
|
-
self.
|
|
195
|
-
self.weak_min_samples_split = weak_min_samples_split
|
|
225
|
+
self.weak_cls_params = weak_cls_params
|
|
196
226
|
self.weak_cls_strategy = weak_cls_strategy
|
|
197
227
|
if weak_cls_num_jobs is None or weak_cls_num_jobs <= 0:
|
|
198
228
|
self.weak_cls_num_jobs = os.cpu_count()
|
|
199
229
|
else:
|
|
200
230
|
self.weak_cls_num_jobs = int(weak_cls_num_jobs)
|
|
231
|
+
self.weak_cls_pair_count = weak_cls_pair_count
|
|
201
232
|
|
|
202
233
|
self.h_list = []
|
|
203
234
|
self.ind_list = []
|
|
204
235
|
self.classes_ = None
|
|
205
236
|
|
|
237
|
+
def topNPairs(self, X, n):
|
|
238
|
+
assert n < X.shape[1]*(X.shape[1]-1)
|
|
239
|
+
cov = np.corrcoef(X, rowvar=False)
|
|
240
|
+
abscov = np.abs(cov)
|
|
241
|
+
flatcov = []
|
|
242
|
+
for i in range(X.shape[1]):
|
|
243
|
+
for j in range(i+1, X.shape[1]):
|
|
244
|
+
flatcov.append((abscov[i, j], (i, j)))
|
|
245
|
+
flatcov.sort()
|
|
246
|
+
return [idx for val, idx in flatcov[-n:]]
|
|
247
|
+
|
|
206
248
|
@timer
|
|
207
249
|
def _build_weak_classifiers_sq(self, X, y):
|
|
208
250
|
n_records = X.shape[0]
|
|
@@ -221,26 +263,32 @@ class QBoostClassifier(ClassifierBase):
|
|
|
221
263
|
X[:, [l]],
|
|
222
264
|
y,
|
|
223
265
|
self.weak_cls_type,
|
|
224
|
-
self.
|
|
225
|
-
self.weak_min_samples_split,
|
|
266
|
+
self.weak_cls_params,
|
|
226
267
|
)
|
|
227
268
|
weak_classifier.train()
|
|
228
269
|
self.ind_list.append([l])
|
|
229
270
|
self.h_list.append(weak_classifier)
|
|
230
271
|
|
|
231
272
|
if self.weak_cls_schedule >= 2:
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
273
|
+
"""
|
|
274
|
+
Use up to weak_cls_pair_count pairs, ordered by absolute covariance
|
|
275
|
+
|
|
276
|
+
"""
|
|
277
|
+
if self.weak_cls_pair_count is None:
|
|
278
|
+
weak_cls_pair_count = n_dims * (n_dims - 1)
|
|
279
|
+
else:
|
|
280
|
+
weak_cls_pair_count = self.weak_cls_pair_count
|
|
281
|
+
pairs = self.topNPairs(X, weak_cls_pair_count)
|
|
282
|
+
for i, j in pairs:
|
|
283
|
+
weak_classifier = WeakClassifier(
|
|
284
|
+
X[:, [i, j]],
|
|
285
|
+
y,
|
|
286
|
+
self.weak_cls_type,
|
|
287
|
+
welf.weak_cls_params
|
|
288
|
+
)
|
|
289
|
+
weak_classifier.train()
|
|
290
|
+
self.ind_list.append([i, j])
|
|
291
|
+
self.h_list.append(weak_classifier)
|
|
244
292
|
|
|
245
293
|
if self.weak_cls_schedule >= 3:
|
|
246
294
|
for i in range(n_dims):
|
|
@@ -250,8 +298,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
250
298
|
X[:, [i, j, k]],
|
|
251
299
|
y,
|
|
252
300
|
self.weak_cls_type,
|
|
253
|
-
self.
|
|
254
|
-
self.weak_min_samples_split,
|
|
301
|
+
self.weak_cls_params,
|
|
255
302
|
)
|
|
256
303
|
weak_classifier.train()
|
|
257
304
|
self.ind_list.append([i, j, k])
|
|
@@ -267,16 +314,14 @@ class QBoostClassifier(ClassifierBase):
|
|
|
267
314
|
n_records,
|
|
268
315
|
n_dims,
|
|
269
316
|
weak_cls_type,
|
|
270
|
-
|
|
271
|
-
weak_min_samples_split,
|
|
317
|
+
weak_cls_params,
|
|
272
318
|
):
|
|
273
319
|
# Train the weak classifier
|
|
274
320
|
weak_classifier = WeakClassifier(
|
|
275
321
|
X_subset,
|
|
276
322
|
y,
|
|
277
323
|
weak_cls_type,
|
|
278
|
-
|
|
279
|
-
weak_min_samples_split,
|
|
324
|
+
weak_cls_params,
|
|
280
325
|
)
|
|
281
326
|
weak_classifier.train()
|
|
282
327
|
|
|
@@ -307,8 +352,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
307
352
|
n_records,
|
|
308
353
|
n_dims,
|
|
309
354
|
self.weak_cls_type,
|
|
310
|
-
self.
|
|
311
|
-
self.weak_min_samples_split,
|
|
355
|
+
self.weak_cls_params,
|
|
312
356
|
)
|
|
313
357
|
)
|
|
314
358
|
|
|
@@ -323,8 +367,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
323
367
|
n_records,
|
|
324
368
|
n_dims,
|
|
325
369
|
self.weak_cls_type,
|
|
326
|
-
self.
|
|
327
|
-
self.weak_min_samples_split,
|
|
370
|
+
self.weak_cls_params,
|
|
328
371
|
)
|
|
329
372
|
)
|
|
330
373
|
|
|
@@ -340,8 +383,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
340
383
|
n_records,
|
|
341
384
|
n_dims,
|
|
342
385
|
self.weak_cls_type,
|
|
343
|
-
self.
|
|
344
|
-
self.weak_min_samples_split,
|
|
386
|
+
self.weak_cls_params,
|
|
345
387
|
)
|
|
346
388
|
)
|
|
347
389
|
|
|
@@ -367,8 +409,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
367
409
|
n_records,
|
|
368
410
|
n_dims,
|
|
369
411
|
weak_cls_type,
|
|
370
|
-
|
|
371
|
-
weak_min_samples_split,
|
|
412
|
+
weak_cls_params,
|
|
372
413
|
):
|
|
373
414
|
"""Train a weak classifier using shared memory."""
|
|
374
415
|
|
|
@@ -386,8 +427,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
386
427
|
X_subset,
|
|
387
428
|
y_shared,
|
|
388
429
|
weak_cls_type,
|
|
389
|
-
|
|
390
|
-
weak_min_samples_split,
|
|
430
|
+
weak_cls_params,
|
|
391
431
|
)
|
|
392
432
|
weak_classifier.train()
|
|
393
433
|
|
|
@@ -437,8 +477,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
437
477
|
n_records,
|
|
438
478
|
n_dims,
|
|
439
479
|
self.weak_cls_type,
|
|
440
|
-
self.
|
|
441
|
-
self.weak_min_samples_split,
|
|
480
|
+
self.weak_cls_params,
|
|
442
481
|
)
|
|
443
482
|
)
|
|
444
483
|
|
|
@@ -454,8 +493,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
454
493
|
n_records,
|
|
455
494
|
n_dims,
|
|
456
495
|
self.weak_cls_type,
|
|
457
|
-
self.
|
|
458
|
-
self.weak_min_samples_split,
|
|
496
|
+
self.weak_cls_params,
|
|
459
497
|
)
|
|
460
498
|
)
|
|
461
499
|
|
|
@@ -472,8 +510,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
472
510
|
n_records,
|
|
473
511
|
n_dims,
|
|
474
512
|
self.weak_cls_type,
|
|
475
|
-
self.
|
|
476
|
-
self.weak_min_samples_split,
|
|
513
|
+
self.weak_cls_params,
|
|
477
514
|
)
|
|
478
515
|
)
|
|
479
516
|
|
|
@@ -581,7 +618,7 @@ class QBoostClassifier(ClassifierBase):
|
|
|
581
618
|
|
|
582
619
|
return y
|
|
583
620
|
|
|
584
|
-
def predict(self, X: np.array):
|
|
621
|
+
def predict(self, X: np.array, threshold=0):
|
|
585
622
|
"""
|
|
586
623
|
Predict classes for X.
|
|
587
624
|
|
|
@@ -589,6 +626,8 @@ class QBoostClassifier(ClassifierBase):
|
|
|
589
626
|
----------
|
|
590
627
|
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
591
628
|
|
|
629
|
+
threshold: Prediction threshold.
|
|
630
|
+
|
|
592
631
|
Returns
|
|
593
632
|
-------
|
|
594
633
|
y : ndarray of shape (n_samples,)
|
|
@@ -596,7 +635,9 @@ class QBoostClassifier(ClassifierBase):
|
|
|
596
635
|
"""
|
|
597
636
|
|
|
598
637
|
y = self.predict_raw(X)
|
|
599
|
-
|
|
638
|
+
|
|
639
|
+
y[y < threshold] = -1
|
|
640
|
+
y[y >= threshold] = 1
|
|
600
641
|
|
|
601
642
|
return y
|
|
602
643
|
|
|
@@ -628,9 +669,10 @@ class QBoostClassifier(ClassifierBase):
|
|
|
628
669
|
)
|
|
629
670
|
|
|
630
671
|
J = np.tensordot(h_vals, h_vals, axes=(1, 1))
|
|
672
|
+
J = J.astype(np.float64)
|
|
631
673
|
J += np.diag(self.lambda_coef * np.ones((n_classifiers)))
|
|
632
674
|
C = -2.0 * np.tensordot(h_vals, y, axes=(1, 0))
|
|
633
|
-
|
|
675
|
+
|
|
634
676
|
# J, C = get_hamiltonian_pyx(y, h_vals, self.lambda_coef, n_records)
|
|
635
677
|
|
|
636
678
|
C = C.reshape((n_classifiers, 1))
|
|
@@ -24,6 +24,12 @@ class GraphClustering(ClusteringBase):
|
|
|
24
24
|
|
|
25
25
|
device: The device used, dirac-1 or dirac-3; default: dirac-3.
|
|
26
26
|
|
|
27
|
+
solver_access: Solver access type: cloud or direct; default: cloud.
|
|
28
|
+
|
|
29
|
+
ip_addr: IP address of the device when direct access is used; default: None.
|
|
30
|
+
|
|
31
|
+
port: Port number of the device when direct access is used; default: None.
|
|
32
|
+
|
|
27
33
|
Examples
|
|
28
34
|
---------
|
|
29
35
|
|
|
@@ -64,11 +70,16 @@ class GraphClustering(ClusteringBase):
|
|
|
64
70
|
relaxation_schedule=2,
|
|
65
71
|
num_samples=1,
|
|
66
72
|
device="dirac-3",
|
|
73
|
+
solver_access="cloud",
|
|
74
|
+
ip_addr=None,
|
|
75
|
+
port=None,
|
|
67
76
|
):
|
|
68
77
|
super(GraphClustering).__init__()
|
|
69
78
|
|
|
70
79
|
assert device in ["dirac-1", "dirac-3"]
|
|
71
80
|
|
|
81
|
+
assert solver_access in ["cloud", "direct"]
|
|
82
|
+
|
|
72
83
|
self.graph = graph
|
|
73
84
|
self.num_nodes = graph.number_of_nodes()
|
|
74
85
|
self.num_edges = graph.number_of_edges()
|
|
@@ -77,6 +88,9 @@ class GraphClustering(ClusteringBase):
|
|
|
77
88
|
self.relaxation_schedule = relaxation_schedule
|
|
78
89
|
self.num_samples = num_samples
|
|
79
90
|
self.device = device
|
|
91
|
+
self.solver_access = solver_access
|
|
92
|
+
self.ip_addr = ip_addr
|
|
93
|
+
self.port = port
|
|
80
94
|
self.labels = None
|
|
81
95
|
|
|
82
96
|
def get_hamiltonian(self):
|
|
@@ -188,7 +202,13 @@ class Clustering(ClusteringBase):
|
|
|
188
202
|
distance_func: Distance function used; default: squared_l2_norm.
|
|
189
203
|
|
|
190
204
|
device: The device used, dirac-1 or dirac-3; default: dirac-3.
|
|
191
|
-
|
|
205
|
+
|
|
206
|
+
solver_access: Solver access type: cloud or direct; default: cloud.
|
|
207
|
+
|
|
208
|
+
ip_addr: IP address of the device when direct access is used; default: None.
|
|
209
|
+
|
|
210
|
+
port: Port number of the device when direct access is used; default: None.
|
|
211
|
+
|
|
192
212
|
Examples
|
|
193
213
|
---------
|
|
194
214
|
|
|
@@ -222,17 +242,25 @@ class Clustering(ClusteringBase):
|
|
|
222
242
|
num_samples: int = 1,
|
|
223
243
|
distance_func: str = "squared_l2_norm",
|
|
224
244
|
device: str = "dirac-3",
|
|
245
|
+
solver_access="cloud",
|
|
246
|
+
ip_addr=None,
|
|
247
|
+
port=None,
|
|
225
248
|
):
|
|
226
249
|
super(Clustering).__init__()
|
|
227
250
|
|
|
228
251
|
assert device in ["dirac-1", "dirac-3"]
|
|
229
252
|
|
|
253
|
+
assert solver_access in ["cloud", "direct"]
|
|
254
|
+
|
|
230
255
|
self.num_clusters = num_clusters
|
|
231
256
|
self.alpha = alpha
|
|
232
257
|
self.relaxation_schedule = relaxation_schedule
|
|
233
258
|
self.num_samples = num_samples
|
|
234
259
|
self.distance_func = distance_func
|
|
235
260
|
self.device = device
|
|
261
|
+
self.solver_access = solver_access
|
|
262
|
+
self.ip_addr = ip_addr
|
|
263
|
+
self.port = port
|
|
236
264
|
self.labels = None
|
|
237
265
|
|
|
238
266
|
assert distance_func in ["squared_l2_norm"], (
|
|
@@ -7,7 +7,7 @@ from eqc_models.solvers.qciclient import (
|
|
|
7
7
|
Dirac1CloudSolver,
|
|
8
8
|
Dirac3CloudSolver,
|
|
9
9
|
)
|
|
10
|
-
|
|
10
|
+
from eqc_models.solvers.eqcdirect import Dirac3DirectSolver
|
|
11
11
|
|
|
12
12
|
class ClusteringBase(QuadraticModel):
|
|
13
13
|
"""
|
|
@@ -19,14 +19,26 @@ class ClusteringBase(QuadraticModel):
|
|
|
19
19
|
relaxation_schedule=2,
|
|
20
20
|
num_samples=1,
|
|
21
21
|
device="dirac-3",
|
|
22
|
+
solver_access="cloud",
|
|
23
|
+
ip_addr=None,
|
|
24
|
+
port=None,
|
|
22
25
|
):
|
|
23
26
|
super(self).__init__(None, None, None)
|
|
24
27
|
|
|
25
28
|
assert device in ["dirac-1", "dirac-3"]
|
|
26
29
|
|
|
30
|
+
assert solver_access in ["cloud", "direct"]
|
|
31
|
+
|
|
32
|
+
if device == "dirac-1" and solver_access == "direct":
|
|
33
|
+
print("Dirac-1 is only available on cloud")
|
|
34
|
+
solver_access = "cloud"
|
|
35
|
+
|
|
27
36
|
self.relaxation_schedule = relaxation_schedule
|
|
28
37
|
self.num_samples = num_samples
|
|
29
38
|
self.device = device
|
|
39
|
+
self.solver_access = solver_access
|
|
40
|
+
self.ip_addr = ip_addr
|
|
41
|
+
self.port = port
|
|
30
42
|
|
|
31
43
|
def fit(self, X: np.array):
|
|
32
44
|
pass
|
|
@@ -63,7 +75,12 @@ class ClusteringBase(QuadraticModel):
|
|
|
63
75
|
num_samples=self.num_samples,
|
|
64
76
|
)
|
|
65
77
|
elif self.device == "dirac-3":
|
|
66
|
-
|
|
78
|
+
if self.solver_access == "direct":
|
|
79
|
+
solver = Dirac3DirectSolver()
|
|
80
|
+
solver.connect(self.ip_addr, self.port)
|
|
81
|
+
else:
|
|
82
|
+
solver = Dirac3CloudSolver()
|
|
83
|
+
|
|
67
84
|
response = solver.solve(
|
|
68
85
|
self,
|
|
69
86
|
sum_constraint=self._sum_constraint,
|
|
@@ -71,10 +88,16 @@ class ClusteringBase(QuadraticModel):
|
|
|
71
88
|
num_samples=self.num_samples,
|
|
72
89
|
)
|
|
73
90
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
91
|
+
if self.solver_access == "cloud":
|
|
92
|
+
energies = response["results"]["energies"]
|
|
93
|
+
solutions = response["results"]["solutions"]
|
|
94
|
+
elif self.solver_access == "direct":
|
|
95
|
+
energies = response["energy"]
|
|
96
|
+
solutions = response["solution"]
|
|
97
|
+
|
|
98
|
+
min_id = np.argmin(energies)
|
|
99
|
+
sol = solutions[min_id]
|
|
100
|
+
|
|
78
101
|
print(response)
|
|
79
102
|
|
|
80
103
|
return sol, response
|