eqc-models 0.11.0__py3-none-any.whl → 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eqc_models-0.12.0.data/platlib/eqc_models/assignment/__init__.py +6 -0
- eqc_models-0.12.0.data/platlib/eqc_models/assignment/resource.py +165 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/polyeval.c +4208 -3435
- eqc_models-0.12.0.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/quadratic.py +2 -2
- eqc_models-0.12.0.data/platlib/eqc_models/base/results.py +166 -0
- eqc_models-0.12.0.data/platlib/eqc_models/graph/__init__.py +9 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/graph/base.py +8 -4
- eqc_models-0.12.0.data/platlib/eqc_models/graph/shortestpath.py +157 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/classifierbase.py +31 -5
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/classifierqboost.py +14 -1
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/classifierqsvm.py +223 -19
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/clustering.py +5 -5
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/clusteringbase.py +1 -1
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/decomposition.py +39 -16
- eqc_models-0.12.0.data/platlib/eqc_models/process/base.py +18 -0
- eqc_models-0.12.0.data/platlib/eqc_models/process/mpc.py +17 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/solvers/__init__.py +1 -5
- eqc_models-0.12.0.data/platlib/eqc_models/solvers/eqcdirect.py +71 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/solvers/qciclient.py +6 -3
- {eqc_models-0.11.0.dist-info → eqc_models-0.12.0.dist-info}/METADATA +2 -3
- eqc_models-0.12.0.dist-info/RECORD +65 -0
- {eqc_models-0.11.0.dist-info → eqc_models-0.12.0.dist-info}/WHEEL +1 -1
- eqc_models-0.11.0.data/platlib/eqc_models/assignment/__init__.py +0 -5
- eqc_models-0.11.0.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
- eqc_models-0.11.0.data/platlib/eqc_models/base/results.py +0 -94
- eqc_models-0.11.0.data/platlib/eqc_models/graph/__init__.py +0 -6
- eqc_models-0.11.0.data/platlib/eqc_models/sequence/scheduling.py +0 -29
- eqc_models-0.11.0.dist-info/RECORD +0 -61
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/compile_extensions.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/algorithms/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/algorithms/base.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/algorithms/penaltymultiplier.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/allocation/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/allocation/allocation.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/allocation/portbase.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/allocation/portmomentum.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/assignment/qap.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/assignment/setpartition.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/base.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/constraints.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/operators.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/polyeval.pyx +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/base/polynomial.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/combinatorics/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/combinatorics/setcover.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/combinatorics/setpartition.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/decoding.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/graph/hypergraph.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/graph/maxcut.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/graph/maxkcut.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/graph/partition.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/cvqboost_hamiltonian.pyx +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.c +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/cvqboost_hamiltonian_c_func.h +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/forecast.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/forecastbase.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/regressor.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/regressorbase.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/ml/reservoir.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/sequence/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/sequence/tsp.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/utilities/__init__.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/utilities/fileio.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/utilities/polynomial.py +0 -0
- {eqc_models-0.11.0.data → eqc_models-0.12.0.data}/platlib/eqc_models/utilities/qplib.py +0 -0
- {eqc_models-0.11.0.dist-info → eqc_models-0.12.0.dist-info}/licenses/LICENSE.txt +0 -0
- {eqc_models-0.11.0.dist-info → eqc_models-0.12.0.dist-info}/top_level.txt +0 -0
|
@@ -245,6 +245,6 @@ class ConstrainedQuadraticModel(ConstraintsMixIn, QuadraticModel):
|
|
|
245
245
|
return self.lhs, self.rhs
|
|
246
246
|
|
|
247
247
|
def evaluateObjective(self, solution: np.ndarray) -> float:
|
|
248
|
-
J = self.quad_objective
|
|
249
|
-
C = self.linear_objective
|
|
248
|
+
J = np.array(self.quad_objective)
|
|
249
|
+
C = np.array(self.linear_objective)
|
|
250
250
|
return np.squeeze(C.T @ solution + solution.T@J@solution)
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
import warnings
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
@dataclasses.dataclass
|
|
6
|
+
class SolutionResults:
|
|
7
|
+
"""
|
|
8
|
+
The class is meant to provide a uniform interface to results, no matter
|
|
9
|
+
the method of running the job. If available, the metrics are reported
|
|
10
|
+
in nanoseconds.
|
|
11
|
+
|
|
12
|
+
Properties
|
|
13
|
+
------------
|
|
14
|
+
|
|
15
|
+
solutions : np.ndarray
|
|
16
|
+
2-d array of solution vectors
|
|
17
|
+
|
|
18
|
+
energies : np.ndarray
|
|
19
|
+
1-d array of energies computed from the device for each sample
|
|
20
|
+
|
|
21
|
+
counts : np.ndarray
|
|
22
|
+
1-d array of counts the particular sample occurred during sampling
|
|
23
|
+
|
|
24
|
+
objectives : np.ndarray
|
|
25
|
+
1-d array of objective values. Is None if the model does not provide
|
|
26
|
+
a separate objective function
|
|
27
|
+
|
|
28
|
+
run_time : np.ndarray
|
|
29
|
+
1-d array of runtimes reported by the device.
|
|
30
|
+
|
|
31
|
+
preprocessing_time : int
|
|
32
|
+
Single value for time spent preprocessing before sampling occurs.
|
|
33
|
+
|
|
34
|
+
postprocessing_time : np.ndarray
|
|
35
|
+
1-d array of time spent post-processing samples.
|
|
36
|
+
|
|
37
|
+
penalties : np.ndarray
|
|
38
|
+
1-d array of penalty values for each sample. Is None if the model does
|
|
39
|
+
not have constraints.
|
|
40
|
+
|
|
41
|
+
device : str
|
|
42
|
+
String that represents the device used to solve the model.
|
|
43
|
+
|
|
44
|
+
time_units : str
|
|
45
|
+
String indicator of the unit of time reported in the metrics. Only
|
|
46
|
+
ns is supported at this time.
|
|
47
|
+
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
solutions : np.ndarray
|
|
51
|
+
energies : np.ndarray
|
|
52
|
+
counts : np.ndarray
|
|
53
|
+
objectives : np.ndarray
|
|
54
|
+
run_time : np.ndarray
|
|
55
|
+
preprocessing_time : int
|
|
56
|
+
postprocessing_time : np.ndarray
|
|
57
|
+
penalties : np.ndarray = None
|
|
58
|
+
device : str = None
|
|
59
|
+
time_units : str = "ns"
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def device_time(self) -> np.ndarray:
|
|
63
|
+
"""
|
|
64
|
+
1-d array of device usage computed from preprocessing, runtime
|
|
65
|
+
and postprocessing time.
|
|
66
|
+
|
|
67
|
+
"""
|
|
68
|
+
if self.run_time:
|
|
69
|
+
pre = self.preprocessing_time
|
|
70
|
+
runtime = np.sum(self.run_time)
|
|
71
|
+
post = np.sum(self.postprocessing_time)
|
|
72
|
+
return pre + runtime + post
|
|
73
|
+
else:
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def total_samples(self):
|
|
78
|
+
return np.sum(self.counts)
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def best_energy(self):
|
|
82
|
+
return np.min(self.energies)
|
|
83
|
+
|
|
84
|
+
@classmethod
|
|
85
|
+
def determine_device_type(cls, device_config):
|
|
86
|
+
"""
|
|
87
|
+
Use the device config object from a cloud response
|
|
88
|
+
to get the device info. It will have a device and job type
|
|
89
|
+
identifiers in it.
|
|
90
|
+
|
|
91
|
+
"""
|
|
92
|
+
devices = [k for k in device_config.keys()]
|
|
93
|
+
# only one device type is supported at a time
|
|
94
|
+
return devices[0]
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def from_cloud_response(cls, model, response, solver):
|
|
98
|
+
""" Fill in the details from the cloud """
|
|
99
|
+
|
|
100
|
+
solutions = np.array(response["results"]["solutions"])
|
|
101
|
+
if model.machine_slacks > 0:
|
|
102
|
+
solutions = solutions[:,:-model.machine_slacks]
|
|
103
|
+
energies = np.array(response["results"]["energies"])
|
|
104
|
+
# interrogate to determine the device type
|
|
105
|
+
try:
|
|
106
|
+
device_type = cls.determine_device_type(response["job_info"]["job_submission"]["device_config"])
|
|
107
|
+
except KeyError:
|
|
108
|
+
print(response.keys())
|
|
109
|
+
raise
|
|
110
|
+
if "dirac-1" in device_type:
|
|
111
|
+
# decode the qubo
|
|
112
|
+
new_solutions = []
|
|
113
|
+
for solution in solutions:
|
|
114
|
+
solution = np.array(solution)
|
|
115
|
+
# build an operator to map the bit vector to scalar
|
|
116
|
+
base_count = np.floor(np.log2(model.upper_bound))+1
|
|
117
|
+
assert np.sum(base_count) == solution.shape[0], "Incorrect solution-upper bound match"
|
|
118
|
+
m = model.upper_bound.shape[0]
|
|
119
|
+
n = solution.shape[0]
|
|
120
|
+
D = np.zeros((m, n), dtype=np.int32)
|
|
121
|
+
j = 0
|
|
122
|
+
for i in range(m):
|
|
123
|
+
k = int(base_count[i])
|
|
124
|
+
D[i, j:j+k] = 2**np.arange(k)
|
|
125
|
+
j += k
|
|
126
|
+
solution = D@solution
|
|
127
|
+
new_solutions.append(solution)
|
|
128
|
+
solutions = np.array(new_solutions)
|
|
129
|
+
if hasattr(model, "evaluateObjective"):
|
|
130
|
+
objectives = np.zeros((solutions.shape[0],), dtype=np.float32)
|
|
131
|
+
for i in range(solutions.shape[0]):
|
|
132
|
+
try:
|
|
133
|
+
objective = model.evaluateObjective(solutions[i])
|
|
134
|
+
except NotImplementedError:
|
|
135
|
+
warnings.warn(f"Cannot set objective value in results for {model.__class__}")
|
|
136
|
+
objectives = None
|
|
137
|
+
break
|
|
138
|
+
objectives[i] = objective
|
|
139
|
+
else:
|
|
140
|
+
objectives = None
|
|
141
|
+
if hasattr(model, "evaluatePenalties"):
|
|
142
|
+
penalties = np.zeros((solutions.shape[0],), dtype=np.float32)
|
|
143
|
+
for i in range(solutions.shape[0]):
|
|
144
|
+
penalties[i] = model.evaluatePenalties(solutions[i]) + model.offset
|
|
145
|
+
else:
|
|
146
|
+
penalties = None
|
|
147
|
+
counts = np.array(response["results"]["counts"])
|
|
148
|
+
job_id = response["job_info"]["job_id"]
|
|
149
|
+
try:
|
|
150
|
+
metrics = solver.client.get_job_metrics(job_id=job_id)
|
|
151
|
+
metrics = metrics["job_metrics"]
|
|
152
|
+
time_ns = metrics["time_ns"]
|
|
153
|
+
device = time_ns["device"][device_type]
|
|
154
|
+
runtime = device["samples"]["runtime"]
|
|
155
|
+
post = device["samples"].get("postprocessing_time", [0 for t in runtime])
|
|
156
|
+
pre = device["samples"].get("preprocessing_time", 0)
|
|
157
|
+
except KeyError:
|
|
158
|
+
time_ns = []
|
|
159
|
+
runtime = []
|
|
160
|
+
post = []
|
|
161
|
+
pre = None
|
|
162
|
+
results = SolutionResults(solutions, energies, counts, objectives,
|
|
163
|
+
runtime, pre, post, penalties=penalties,
|
|
164
|
+
device=device_type, time_units="ns")
|
|
165
|
+
|
|
166
|
+
return results
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# (C) Quantum Computing Inc., 2024.
|
|
2
|
+
|
|
3
|
+
from .base import EdgeMixin, EdgeModel, GraphModel, NodeModel
|
|
4
|
+
from .maxcut import MaxCutModel
|
|
5
|
+
from .partition import GraphPartitionModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["MaxCutModel", "GraphPartitionModel",
|
|
8
|
+
"EdgeMixin", "EdgeModel", "GraphModel",
|
|
9
|
+
"NodeModel"]
|
|
@@ -7,7 +7,7 @@ class GraphModel(QuadraticModel):
|
|
|
7
7
|
""" """
|
|
8
8
|
def __init__(self, G : nx.Graph):
|
|
9
9
|
self.G = G
|
|
10
|
-
super().__init__(*self.costFunction())
|
|
10
|
+
super(GraphModel, self).__init__(*self.costFunction())
|
|
11
11
|
|
|
12
12
|
@property
|
|
13
13
|
def linear_objective(self):
|
|
@@ -63,12 +63,16 @@ class TwoPartitionModel(NodeModel):
|
|
|
63
63
|
|
|
64
64
|
"""
|
|
65
65
|
|
|
66
|
-
class
|
|
67
|
-
""" Create a model where the variables are edge-based """
|
|
66
|
+
class EdgeMixin:
|
|
68
67
|
|
|
69
68
|
@property
|
|
70
69
|
def variables(self) -> List[str]:
|
|
71
70
|
""" Provide a variable name to index lookup; order enforced by sorting the list before returning """
|
|
72
|
-
names = [
|
|
71
|
+
names = [(u, v) for u, v in self.G.edges]
|
|
73
72
|
names.sort()
|
|
74
73
|
return names
|
|
74
|
+
|
|
75
|
+
class EdgeModel(EdgeMixin, GraphModel):
|
|
76
|
+
""" Create a model where the variables are edge-based """
|
|
77
|
+
|
|
78
|
+
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
r"""
|
|
2
|
+
MIP Shortest Path implementation
|
|
3
|
+
|
|
4
|
+
Given a graph $G$ and nodes $s$ and $t$, find the shortest path
|
|
5
|
+
by edge weight between $s$ and $t$.
|
|
6
|
+
|
|
7
|
+
$$
|
|
8
|
+
\min sum_ij w_ij x_ij
|
|
9
|
+
$$
|
|
10
|
+
subject to
|
|
11
|
+
$$
|
|
12
|
+
\sum_{(u,v)\in E} x_{u,v} - \sum{(v,u)\in E} x_{v,u} = 0 \forall u\in N\\{s,t}
|
|
13
|
+
$$
|
|
14
|
+
and
|
|
15
|
+
$$
|
|
16
|
+
\sum_{(s,v)\in E} x_{s,v} = 1
|
|
17
|
+
$$
|
|
18
|
+
and
|
|
19
|
+
$$
|
|
20
|
+
\sum_{(u,t)\in E} x_{u,t} = 1
|
|
21
|
+
$$
|
|
22
|
+
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from typing import Any, Dict, Tuple
|
|
26
|
+
import logging
|
|
27
|
+
import numpy as np
|
|
28
|
+
import networkx as nx
|
|
29
|
+
from eqc_models.graph import EdgeMixin
|
|
30
|
+
from eqc_models.base.quadratic import ConstrainedQuadraticModel
|
|
31
|
+
|
|
32
|
+
log = logging.getLogger(name=__name__)
|
|
33
|
+
|
|
34
|
+
class ShortestPathModel(EdgeMixin, ConstrainedQuadraticModel):
|
|
35
|
+
"""
|
|
36
|
+
ShortestPathModel describes the MIP formulation for the
|
|
37
|
+
shortest path problem.
|
|
38
|
+
|
|
39
|
+
Parameters
|
|
40
|
+
-------------
|
|
41
|
+
|
|
42
|
+
G : nx.DiGraph
|
|
43
|
+
A directed graph which is assumed to be connected. A graph
|
|
44
|
+
with disconnected subgraphs may reveal a solution if $s$ and $t$
|
|
45
|
+
are in the same subgraph, but testing for the existence of a path
|
|
46
|
+
between s and t using this model is not recommended. This is
|
|
47
|
+
due to the difficulty posed by selecting a penalty multiplier
|
|
48
|
+
large enough to enforce the panalties, which DNE in the infeasible
|
|
49
|
+
case.
|
|
50
|
+
s : Any
|
|
51
|
+
This is the label for the start node.
|
|
52
|
+
t : Any
|
|
53
|
+
This is the label for the end node.
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(self, G: nx.DiGraph, s : Any, t : Any):
|
|
59
|
+
self.G = G
|
|
60
|
+
self.s = s
|
|
61
|
+
self.t = t
|
|
62
|
+
self.lhs, self.rhs = self.buildConstraints()
|
|
63
|
+
C, J = self.buildObjective()
|
|
64
|
+
super(ShortestPathModel, self).__init__(C, J, self.lhs, self.rhs)
|
|
65
|
+
|
|
66
|
+
def buildConstraints(self) -> Tuple[np.ndarray,np.ndarray]:
|
|
67
|
+
"""
|
|
68
|
+
Constraints:
|
|
69
|
+
$$
|
|
70
|
+
sum_j x[i,l] - sum_j x[j,l] = c for all l
|
|
71
|
+
$$
|
|
72
|
+
$c$ is -1, 1 or 0 for $i=t$, $s$ or all others
|
|
73
|
+
|
|
74
|
+
"""
|
|
75
|
+
log.info("Building constraints to find path from %s to %s", self.s, self.t)
|
|
76
|
+
variables = self.variables
|
|
77
|
+
nodes = [n for n in self.G.nodes]
|
|
78
|
+
m = len(nodes)
|
|
79
|
+
n = len(variables)
|
|
80
|
+
_cons = np.zeros((m, n), dtype=np.int8)
|
|
81
|
+
_rhs = np.zeros((m, 1), dtype=np.int8)
|
|
82
|
+
for node_index, k in enumerate(nodes):
|
|
83
|
+
if k == self.s:
|
|
84
|
+
_rhs[node_index, 0] = 1
|
|
85
|
+
elif k == self.t:
|
|
86
|
+
_rhs[node_index, 0] = -1
|
|
87
|
+
for l, (i, j) in enumerate(variables):
|
|
88
|
+
if i == j:
|
|
89
|
+
# self loops are not allowed
|
|
90
|
+
raise ValueError("Self loops are not allowed in ShortestPathModel")
|
|
91
|
+
# # ignore these edges because we can't go back to s or leave t
|
|
92
|
+
elif j == self.s:
|
|
93
|
+
continue
|
|
94
|
+
elif i == self.t:
|
|
95
|
+
continue
|
|
96
|
+
i_index = nodes.index(i)
|
|
97
|
+
j_index = nodes.index(j)
|
|
98
|
+
_cons[i_index, l] = 1
|
|
99
|
+
_cons[j_index, l] = -1
|
|
100
|
+
log.info("LHS shape %s RHS shape %s", _cons.shape, _rhs.shape)
|
|
101
|
+
log.info("checksum %f min %f", np.sum(_cons), np.min(_cons))
|
|
102
|
+
assert np.sum(_rhs) == 0
|
|
103
|
+
return _cons, np.squeeze(_rhs)
|
|
104
|
+
|
|
105
|
+
def buildObjective(self) -> Tuple[np.ndarray, np.ndarray]:
|
|
106
|
+
r"""
|
|
107
|
+
Objective:
|
|
108
|
+
$\min sum_ij w_ij x_ij$
|
|
109
|
+
|
|
110
|
+
"""
|
|
111
|
+
variables = self.variables
|
|
112
|
+
G = self.G
|
|
113
|
+
nodes = G.nodes
|
|
114
|
+
m, n = len(nodes), len(variables)
|
|
115
|
+
_obj = [0 for i in range(n)]
|
|
116
|
+
for index, name in enumerate(variables):
|
|
117
|
+
i, j = name
|
|
118
|
+
_obj[index] = v = G.get_edge_data(i, j)["weight"]
|
|
119
|
+
assert not np.isnan(v), f"Got a NaN at {i, j}"
|
|
120
|
+
J = np.zeros((n, n))
|
|
121
|
+
return np.array(_obj), J
|
|
122
|
+
|
|
123
|
+
def decode(self, solution : np.ndarray) -> Dict:
|
|
124
|
+
"""
|
|
125
|
+
Convert a solution to this model into a path, which is
|
|
126
|
+
a dictionary with each edge described by key, value pairs.
|
|
127
|
+
|
|
128
|
+
"""
|
|
129
|
+
variables = self.variables
|
|
130
|
+
|
|
131
|
+
lhs, rhs = self.constraints
|
|
132
|
+
upper_thresh = max(solution)
|
|
133
|
+
lower_thresh = 0
|
|
134
|
+
while upper_thresh - lower_thresh > 1e-6:
|
|
135
|
+
log.info("Lower Value: %f Upper Value %f", lower_thresh, upper_thresh)
|
|
136
|
+
thresh = (lower_thresh + upper_thresh) / 2
|
|
137
|
+
nx_path = None
|
|
138
|
+
G = nx.DiGraph()
|
|
139
|
+
for (i, j), value in zip(variables, solution):
|
|
140
|
+
if value > thresh:
|
|
141
|
+
G.add_edge(i, j)
|
|
142
|
+
path = {}
|
|
143
|
+
try:
|
|
144
|
+
nx_path = nx.shortest_path(G, self.s, self.t)
|
|
145
|
+
upper_thresh = thresh
|
|
146
|
+
lower_thresh = thresh
|
|
147
|
+
except (nx.exception.NodeNotFound, nx.NetworkXAlgorithmError) as err:
|
|
148
|
+
lower_thresh = thresh
|
|
149
|
+
if nx_path is None:
|
|
150
|
+
raise RuntimeError(f"Solution does not describe path from {self.s} to {self.t}")
|
|
151
|
+
path = {}
|
|
152
|
+
for i, v in enumerate(nx_path):
|
|
153
|
+
path[nx_path[i-1]] = v
|
|
154
|
+
if self.t in path:
|
|
155
|
+
del path[self.t]
|
|
156
|
+
return path
|
|
157
|
+
|
|
@@ -11,6 +11,7 @@ import numpy as np
|
|
|
11
11
|
|
|
12
12
|
from eqc_models import QuadraticModel
|
|
13
13
|
from eqc_models.solvers.qciclient import Dirac3CloudSolver
|
|
14
|
+
from eqc_models.solvers.eqcdirect import Dirac3DirectSolver
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
class ClassifierBase(QuadraticModel):
|
|
@@ -18,12 +19,25 @@ class ClassifierBase(QuadraticModel):
|
|
|
18
19
|
self,
|
|
19
20
|
relaxation_schedule=2,
|
|
20
21
|
num_samples=1,
|
|
22
|
+
solver_access="cloud",
|
|
23
|
+
ip_addr=None,
|
|
24
|
+
port=None,
|
|
21
25
|
):
|
|
22
26
|
|
|
23
27
|
super(self).__init__(None, None, None)
|
|
24
28
|
|
|
25
29
|
self.relaxation_schedule = relaxation_schedule
|
|
26
30
|
self.num_samples = num_samples
|
|
31
|
+
|
|
32
|
+
assert solver_access in ["cloud", "direct"]
|
|
33
|
+
|
|
34
|
+
if solver_access == "direct":
|
|
35
|
+
assert ip_addr is not None, "ip_addr should be set when using direct solver!"
|
|
36
|
+
assert port is not None,"port should be set when using direct solver!"
|
|
37
|
+
|
|
38
|
+
self.solver_access = solver_access
|
|
39
|
+
self.ip_addr = ip_addr
|
|
40
|
+
self.port = port
|
|
27
41
|
self.params = None
|
|
28
42
|
self.X_train = None
|
|
29
43
|
self.y_train = None
|
|
@@ -53,18 +67,30 @@ class ClassifierBase(QuadraticModel):
|
|
|
53
67
|
return
|
|
54
68
|
|
|
55
69
|
def solve(self):
|
|
56
|
-
|
|
70
|
+
|
|
71
|
+
if self.solver_access == "direct":
|
|
72
|
+
solver = Dirac3DirectSolver()
|
|
73
|
+
solver.connect(self.ip_addr, self.port)
|
|
74
|
+
else:
|
|
75
|
+
solver = Dirac3CloudSolver()
|
|
76
|
+
|
|
57
77
|
response = solver.solve(
|
|
58
78
|
self,
|
|
59
79
|
sum_constraint=self._sum_constraint,
|
|
60
80
|
relaxation_schedule=self.relaxation_schedule,
|
|
61
|
-
solution_precision=
|
|
81
|
+
solution_precision=None,
|
|
62
82
|
num_samples=self.num_samples,
|
|
63
83
|
)
|
|
64
84
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
85
|
+
if self.solver_access == "cloud":
|
|
86
|
+
energies = response["results"]["energies"]
|
|
87
|
+
solutions = response["results"]["solutions"]
|
|
88
|
+
elif self.solver_access == "direct":
|
|
89
|
+
energies = response["energy"]
|
|
90
|
+
solutions = response["solution"]
|
|
91
|
+
|
|
92
|
+
min_id = np.argmin(energies)
|
|
93
|
+
sol = solutions[min_id]
|
|
68
94
|
|
|
69
95
|
print(response)
|
|
70
96
|
|
|
@@ -94,6 +94,12 @@ class QBoostClassifier(ClassifierBase):
|
|
|
94
94
|
|
|
95
95
|
num_samples: Number of samples used by Dirac-3; default: 1.
|
|
96
96
|
|
|
97
|
+
solver_access: Solver access type: cloud or direct; default: cloud.
|
|
98
|
+
|
|
99
|
+
ip_addr: IP address of the device when direct access is used; default: None.
|
|
100
|
+
|
|
101
|
+
port: Port number of the device when direct access is used; default: None.
|
|
102
|
+
|
|
97
103
|
lambda_coef: A penalty multiplier; default: 0.
|
|
98
104
|
|
|
99
105
|
weak_cls_schedule: Weak classifier schedule. Is either 1, 2,
|
|
@@ -155,6 +161,9 @@ class QBoostClassifier(ClassifierBase):
|
|
|
155
161
|
self,
|
|
156
162
|
relaxation_schedule=2,
|
|
157
163
|
num_samples=1,
|
|
164
|
+
solver_access="cloud",
|
|
165
|
+
ip_addr=None,
|
|
166
|
+
port=None,
|
|
158
167
|
lambda_coef=0,
|
|
159
168
|
weak_cls_schedule=2,
|
|
160
169
|
weak_cls_type="lg",
|
|
@@ -172,9 +181,13 @@ class QBoostClassifier(ClassifierBase):
|
|
|
172
181
|
"multi_processing_shm",
|
|
173
182
|
"sequential",
|
|
174
183
|
]
|
|
175
|
-
|
|
184
|
+
assert solver_access in ["cloud", "direct"]
|
|
185
|
+
|
|
176
186
|
self.relaxation_schedule = relaxation_schedule
|
|
177
187
|
self.num_samples = num_samples
|
|
188
|
+
self.solver_access = solver_access
|
|
189
|
+
self.ip_addr = ip_addr
|
|
190
|
+
self.port = port
|
|
178
191
|
self.lambda_coef = lambda_coef
|
|
179
192
|
self.weak_cls_schedule = weak_cls_schedule
|
|
180
193
|
self.weak_cls_type = weak_cls_type
|