eqc-models 0.10.3__tar.gz → 0.11.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {eqc_models-0.10.3/eqc_models.egg-info → eqc_models-0.11.1}/PKG-INFO +1 -1
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/conf.py +1 -1
- eqc_models-0.11.1/eqc_models/assignment/__init__.py +6 -0
- eqc_models-0.11.1/eqc_models/assignment/resource.py +165 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/__init__.py +3 -1
- eqc_models-0.11.1/eqc_models/base/results.py +166 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/classifierqsvm.py +197 -21
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/decomposition.py +39 -16
- eqc_models-0.11.1/eqc_models/process/base.py +13 -0
- eqc_models-0.11.1/eqc_models/process/mpc.py +17 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/solvers/qciclient.py +6 -3
- {eqc_models-0.10.3 → eqc_models-0.11.1/eqc_models.egg-info}/PKG-INFO +1 -1
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models.egg-info/SOURCES.txt +7 -1
- eqc_models-0.11.1/scripts/crew_assignment_example.py +51 -0
- eqc_models-0.11.1/scripts/qsvm_dual_iris_dirac3.py +101 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/qsvm_iris_dirac3.py +2 -5
- eqc_models-0.11.1/scripts/results_example.py +31 -0
- eqc_models-0.10.3/eqc_models/assignment/__init__.py +0 -5
- eqc_models-0.10.3/scripts/duality_example.py +0 -73
- {eqc_models-0.10.3 → eqc_models-0.11.1}/.gitlab-ci.yml +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/LICENSE.txt +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/MANIFEST.in +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/README.md +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/compile_extensions.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/Makefile +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/basic.css +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/css/badge_only.css +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/css/theme.css +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/custom.css +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/file.png +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/minus.png +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/plus.png +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/pygments.css +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/build/html/_static/white_logo.png +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/make.bat +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/_static/custom.css +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/_static/white_logo.png +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/dependencies.rst +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/eqc_models.rst +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/index.rst +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/modules.rst +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/docs/source/usage.rst +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/algorithms/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/algorithms/base.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/algorithms/penaltymultiplier.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/allocation/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/allocation/allocation.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/allocation/portbase.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/allocation/portmomentum.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/assignment/qap.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/assignment/setpartition.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/base.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/constraints.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/operators.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/polyeval.c +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/polyeval.pyx +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/polynomial.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/base/quadratic.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/combinatorics/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/combinatorics/setcover.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/combinatorics/setpartition.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/decoding.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/graph/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/graph/base.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/graph/hypergraph.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/graph/maxcut.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/graph/maxkcut.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/graph/partition.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/classifierbase.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/classifierqboost.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/clustering.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/clusteringbase.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/cvqboost_hamiltonian.pyx +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/cvqboost_hamiltonian_c_func.c +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/cvqboost_hamiltonian_c_func.h +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/forecast.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/forecastbase.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/regressor.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/regressorbase.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/ml/reservoir.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/sequence/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/sequence/tsp.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/solvers/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/utilities/__init__.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/utilities/fileio.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/utilities/polynomial.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models/utilities/qplib.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models.egg-info/dependency_links.txt +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models.egg-info/requires.txt +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/eqc_models.egg-info/top_level.txt +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/pyproject.toml +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/binary_job_example.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/c6h6_graph_clustering.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/clustering.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/continuous_job_example.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/graph_clustering.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/graph_partitioning.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/hamiltonian_to_polynomial.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/hypergraph.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/integer_job_example.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/karate_graph_clustering.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/lin_reg_dirac3.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/mackey_glass_cell_production_series.csv +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/pca_iris_dirac3.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/port_opt_dirac3.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/qboost_iris_dirac3.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/qplib_benchmark_config.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/qplib_reader.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/qplib_runner.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/reservoir_forecast.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/rundoctests.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/scripts/utils.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/setup.cfg +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/doctest_base.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testallocationmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testconstraint.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testcvqboost.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testeqcdirectsolver.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testgraphpartitionmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testhypergraphmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testmaxcutmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testpolynomialmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testqapmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testqciclientsolver.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testquadraticmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testsetcovermodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testsetpartitionmodel.py +0 -0
- {eqc_models-0.10.3 → eqc_models-0.11.1}/test/testtsp.py +0 -0
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
|
|
10
10
|
project = 'eqc-models'
|
|
11
11
|
# sphinx adds a period after copyright
|
|
12
|
-
copyright = '
|
|
12
|
+
copyright = '2025 Quantum Computing Inc. All rights reserved'
|
|
13
13
|
author = 'Quantum Computing Inc.'
|
|
14
14
|
|
|
15
15
|
import importlib.metadata as metadata
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from typing import (Dict, List)
|
|
2
|
+
import numpy as np
|
|
3
|
+
from eqc_models.base.quadratic import ConstrainedQuadraticModel
|
|
4
|
+
from eqc_models.base.constraints import InequalitiesMixin
|
|
5
|
+
|
|
6
|
+
class ResourceAssignmentModel(InequalitiesMixin, ConstrainedQuadraticModel):
|
|
7
|
+
"""
|
|
8
|
+
Resource assignment model
|
|
9
|
+
|
|
10
|
+
Parameters
|
|
11
|
+
------------
|
|
12
|
+
|
|
13
|
+
resources : List
|
|
14
|
+
tasks : List
|
|
15
|
+
|
|
16
|
+
>>> # name is not a required attribute of the resources or tasks
|
|
17
|
+
>>> crews = [{"name": "Maintenance Crew 1", "skills": ["A", "F"], "capacity": 5, "cost": 4},
|
|
18
|
+
... {"name": "Baggage Crew 1", "skills": ["B"], "capacity": 4, "cost": 1},
|
|
19
|
+
... {"name": "Maintenance Crew 2", "skills": ["A", "F"], "capacity": 5, "cost": 2}]
|
|
20
|
+
>>> tasks = [{"name": "Refuel", "skill_need": "F", "load": 3},
|
|
21
|
+
... {"name": "Baggage", "skill_need": "B", "load": 1}]
|
|
22
|
+
>>> model = ResourceAssignmentModel(crews, tasks)
|
|
23
|
+
>>> assignments = model.createAssignmentVars()
|
|
24
|
+
>>> assignments
|
|
25
|
+
[{'resource': 0, 'task': 0}, {'resource': 1, 'task': 1}, {'resource': 2, 'task': 0}]
|
|
26
|
+
>>> A, b, senses = model.constrainAssignments(assignments)
|
|
27
|
+
>>> A
|
|
28
|
+
array([[3., 0., 0.],
|
|
29
|
+
[0., 1., 0.],
|
|
30
|
+
[0., 0., 3.],
|
|
31
|
+
[3., 0., 3.],
|
|
32
|
+
[0., 3., 0.]], dtype=float32)
|
|
33
|
+
>>> b
|
|
34
|
+
array([5., 4., 5., 3., 3.], dtype=float32)
|
|
35
|
+
>>> senses
|
|
36
|
+
['LE', 'LE', 'LE', 'EQ', 'EQ']
|
|
37
|
+
>>> A, b = model.constraints
|
|
38
|
+
>>> A
|
|
39
|
+
array([[3., 0., 0., 1., 0., 0.],
|
|
40
|
+
[0., 1., 0., 0., 1., 0.],
|
|
41
|
+
[0., 0., 3., 0., 0., 1.],
|
|
42
|
+
[3., 0., 3., 0., 0., 0.],
|
|
43
|
+
[0., 3., 0., 0., 0., 0.]])
|
|
44
|
+
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, resources, tasks):
|
|
48
|
+
self.resources = resources
|
|
49
|
+
self.checkTasks(tasks)
|
|
50
|
+
self.tasks = tasks
|
|
51
|
+
self.assignments = assignments = self.createAssignmentVars()
|
|
52
|
+
n = len(assignments) + len(resources)
|
|
53
|
+
self.variables = [f"a{i}" for i in range(len(assignments))]
|
|
54
|
+
self.upper_bound = np.ones((n,))
|
|
55
|
+
self.upper_bound[-len(resources):] = [resource["capacity"] for resource in resources]
|
|
56
|
+
A, b, senses = self.constrainAssignments(assignments)
|
|
57
|
+
J = np.zeros((n, n))
|
|
58
|
+
C = np.zeros((n,), dtype=np.float32)
|
|
59
|
+
# objective is to minimize cost of assignments
|
|
60
|
+
for j, assignment in enumerate(assignments):
|
|
61
|
+
C[j] = resources[assignment["resource"]]["cost"] * tasks[assignment["task"]]["load"]
|
|
62
|
+
super(ResourceAssignmentModel, self).__init__(C, J, A, b)
|
|
63
|
+
self.senses = senses
|
|
64
|
+
# always use a machine slack
|
|
65
|
+
self.machine_slacks = 1
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
def checkTasks(cls, tasks):
|
|
69
|
+
for task in tasks:
|
|
70
|
+
if "skill_need" not in task:
|
|
71
|
+
raise ValueError("All tasks must have the skill_need attribute")
|
|
72
|
+
if "load" not in task:
|
|
73
|
+
raise ValueError("All tasks must have the load attribute")
|
|
74
|
+
|
|
75
|
+
def createAssignmentVars(self):
|
|
76
|
+
""" Examine all combinatins of possible crew-task assignments """
|
|
77
|
+
|
|
78
|
+
assign_vars = []
|
|
79
|
+
resources = self.resources
|
|
80
|
+
tasks = self.tasks
|
|
81
|
+
for i, resource in enumerate(resources):
|
|
82
|
+
skills = resource["skills"]
|
|
83
|
+
for j, task in enumerate(tasks):
|
|
84
|
+
if task["skill_need"] in skills:
|
|
85
|
+
assign_vars.append({"resource": i, "task": j})
|
|
86
|
+
return assign_vars
|
|
87
|
+
|
|
88
|
+
def constrainAssignments(self, assignments : List) -> List:
|
|
89
|
+
"""
|
|
90
|
+
Examine the assignments to determine the necessary constraints to
|
|
91
|
+
ensure feasibility of solution.
|
|
92
|
+
|
|
93
|
+
"""
|
|
94
|
+
# A is sized using the number of crews and the number of assignment variables plus slacks
|
|
95
|
+
m1 = len(self.resources)
|
|
96
|
+
m2 = len(self.tasks)
|
|
97
|
+
n1 = len(assignments)
|
|
98
|
+
m = m1 + m2
|
|
99
|
+
n = n1
|
|
100
|
+
A = np.zeros((m, n), dtype=np.float32)
|
|
101
|
+
b = np.zeros((m,), dtype=np.float32)
|
|
102
|
+
for i, resource in enumerate(self.resources):
|
|
103
|
+
b[i] = resource["capacity"]
|
|
104
|
+
for k, assignment in enumerate(assignments):
|
|
105
|
+
if assignment["resource"] == i:
|
|
106
|
+
A[i, k] = self.tasks[assignment["task"]]["load"]
|
|
107
|
+
assignment_coeff = np.max(A)
|
|
108
|
+
for i, task in enumerate(self.tasks):
|
|
109
|
+
b[m1+i] = assignment_coeff
|
|
110
|
+
for k, assignment in enumerate(assignments):
|
|
111
|
+
if assignment["task"] == i:
|
|
112
|
+
A[m1+i, k] = assignment_coeff
|
|
113
|
+
senses = ["LE" for resource in self.resources] + ["EQ" for task in self.tasks]
|
|
114
|
+
return A, b, senses
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def sum_constraint(self) -> int:
|
|
118
|
+
""" This value is a suggestion which should be used with a machine slack """
|
|
119
|
+
|
|
120
|
+
sc = 0
|
|
121
|
+
sc += sum([resource["capacity"] for resource in self.resources])
|
|
122
|
+
sc += len(self.tasks)
|
|
123
|
+
return sc
|
|
124
|
+
|
|
125
|
+
def decode(self, solution : np.array) -> List[Dict]:
|
|
126
|
+
"""
|
|
127
|
+
Convert the binary solution into a list of tasks
|
|
128
|
+
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
# ensure solution is array
|
|
132
|
+
solution = np.array(solution)
|
|
133
|
+
resource_assignments = [[] for resource in self.resources]
|
|
134
|
+
vals = [val for val in set(solution) if val <= 1.0]
|
|
135
|
+
# check if there are fractional values less than 1
|
|
136
|
+
if solution[~np.logical_or(solution==0, solution>=1)].size>0:
|
|
137
|
+
# iterate over the values and assign tasks by largest value for tasks
|
|
138
|
+
# not assigned already
|
|
139
|
+
remaining_tasks = list(range(len(self.tasks)))
|
|
140
|
+
fltr = self.upper_bound==1
|
|
141
|
+
while len(remaining_tasks) > 0 and solution[fltr].shape[0]>0:
|
|
142
|
+
largest = np.max(solution[fltr])
|
|
143
|
+
indices, = np.where(np.logical_and(fltr, solution == largest))
|
|
144
|
+
for idx in indices:
|
|
145
|
+
assignment = self.assignments[idx]
|
|
146
|
+
if assignment["task"] in remaining_tasks:
|
|
147
|
+
task = self.tasks[assignment["task"]]
|
|
148
|
+
resource_assignments[assignment["resource"]].append(task)
|
|
149
|
+
del remaining_tasks[remaining_tasks.index(assignment["task"])]
|
|
150
|
+
break
|
|
151
|
+
fltr = np.logical_and(fltr, solution < largest)
|
|
152
|
+
else:
|
|
153
|
+
# Use the restriction that a task cannot be assigned more than once
|
|
154
|
+
for j, task in enumerate(self.tasks):
|
|
155
|
+
highest = 0
|
|
156
|
+
best_resource = None
|
|
157
|
+
for a, assignment in zip(solution, self.assignments):
|
|
158
|
+
if assignment["task"] == j:
|
|
159
|
+
if a > highest:
|
|
160
|
+
highest = a
|
|
161
|
+
best_resource = assignment["resource"]
|
|
162
|
+
assert best_resource is not None, f"solution had no positive assignment values for {task}"
|
|
163
|
+
resource_assignments[best_resource].append(task)
|
|
164
|
+
|
|
165
|
+
return resource_assignments
|
|
@@ -65,8 +65,10 @@ from .quadratic import (ConstrainedQuadraticModel, QuadraticModel)
|
|
|
65
65
|
from .polynomial import (PolynomialModel, ConstrainedPolynomialModel)
|
|
66
66
|
from .base import (ModelSolver, EqcModel)
|
|
67
67
|
from .operators import (QUBO, Polynomial)
|
|
68
|
+
from .results import SolutionResults
|
|
68
69
|
|
|
69
70
|
__all__ = ["ConstraintsMixIn", "ConstraintModel", "ConstrainedQuadraticModel",
|
|
70
71
|
"QuadraticModel", "PolynomialModel", "ConstrainedPolynomialModel",
|
|
71
72
|
"InequalitiesMixin", "InequalityConstraintModel",
|
|
72
|
-
"EqcModel", "ModelSolver", "QUBO", "Polynomial"
|
|
73
|
+
"EqcModel", "ModelSolver", "QUBO", "Polynomial",
|
|
74
|
+
"SolutionResults"]
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
import warnings
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
@dataclasses.dataclass
|
|
6
|
+
class SolutionResults:
|
|
7
|
+
"""
|
|
8
|
+
The class is meant to provide a uniform interface to results, no matter
|
|
9
|
+
the method of running the job. If available, the metrics are reported
|
|
10
|
+
in nanoseconds.
|
|
11
|
+
|
|
12
|
+
Properties
|
|
13
|
+
------------
|
|
14
|
+
|
|
15
|
+
solutions : np.ndarray
|
|
16
|
+
2-d array of solution vectors
|
|
17
|
+
|
|
18
|
+
energies : np.ndarray
|
|
19
|
+
1-d array of energies computed from the device for each sample
|
|
20
|
+
|
|
21
|
+
counts : np.ndarray
|
|
22
|
+
1-d array of counts the particular sample occurred during sampling
|
|
23
|
+
|
|
24
|
+
objectives : np.ndarray
|
|
25
|
+
1-d array of objective values. Is None if the model does not provide
|
|
26
|
+
a separate objective function
|
|
27
|
+
|
|
28
|
+
run_time : np.ndarray
|
|
29
|
+
1-d array of runtimes reported by the device.
|
|
30
|
+
|
|
31
|
+
preprocessing_time : int
|
|
32
|
+
Single value for time spent preprocessing before sampling occurs.
|
|
33
|
+
|
|
34
|
+
postprocessing_time : np.ndarray
|
|
35
|
+
1-d array of time spent post-processing samples.
|
|
36
|
+
|
|
37
|
+
penalties : np.ndarray
|
|
38
|
+
1-d array of penalty values for each sample. Is None if the model does
|
|
39
|
+
not have constraints.
|
|
40
|
+
|
|
41
|
+
device : str
|
|
42
|
+
String that represents the device used to solve the model.
|
|
43
|
+
|
|
44
|
+
time_units : str
|
|
45
|
+
String indicator of the unit of time reported in the metrics. Only
|
|
46
|
+
ns is supported at this time.
|
|
47
|
+
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
solutions : np.ndarray
|
|
51
|
+
energies : np.ndarray
|
|
52
|
+
counts : np.ndarray
|
|
53
|
+
objectives : np.ndarray
|
|
54
|
+
run_time : np.ndarray
|
|
55
|
+
preprocessing_time : int
|
|
56
|
+
postprocessing_time : np.ndarray
|
|
57
|
+
penalties : np.ndarray = None
|
|
58
|
+
device : str = None
|
|
59
|
+
time_units : str = "ns"
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def device_time(self) -> np.ndarray:
|
|
63
|
+
"""
|
|
64
|
+
1-d array of device usage computed from preprocessing, runtime
|
|
65
|
+
and postprocessing time.
|
|
66
|
+
|
|
67
|
+
"""
|
|
68
|
+
if self.run_time:
|
|
69
|
+
pre = self.preprocessing_time
|
|
70
|
+
runtime = np.sum(self.run_time)
|
|
71
|
+
post = np.sum(self.postprocessing_time)
|
|
72
|
+
return pre + runtime + post
|
|
73
|
+
else:
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def total_samples(self):
|
|
78
|
+
return np.sum(self.counts)
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def best_energy(self):
|
|
82
|
+
return np.min(self.energies)
|
|
83
|
+
|
|
84
|
+
@classmethod
|
|
85
|
+
def determine_device_type(cls, device_config):
|
|
86
|
+
"""
|
|
87
|
+
Use the device config object from a cloud response
|
|
88
|
+
to get the device info. It will have a device and job type
|
|
89
|
+
identifiers in it.
|
|
90
|
+
|
|
91
|
+
"""
|
|
92
|
+
devices = [k for k in device_config.keys()]
|
|
93
|
+
# only one device type is supported at a time
|
|
94
|
+
return devices[0]
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def from_cloud_response(cls, model, response, solver):
|
|
98
|
+
""" Fill in the details from the cloud """
|
|
99
|
+
|
|
100
|
+
solutions = np.array(response["results"]["solutions"])
|
|
101
|
+
if model.machine_slacks > 0:
|
|
102
|
+
solutions = solutions[:,:-model.machine_slacks]
|
|
103
|
+
energies = np.array(response["results"]["energies"])
|
|
104
|
+
# interrogate to determine the device type
|
|
105
|
+
try:
|
|
106
|
+
device_type = cls.determine_device_type(response["job_info"]["job_submission"]["device_config"])
|
|
107
|
+
except KeyError:
|
|
108
|
+
print(response.keys())
|
|
109
|
+
raise
|
|
110
|
+
if "dirac-1" in device_type:
|
|
111
|
+
# decode the qubo
|
|
112
|
+
new_solutions = []
|
|
113
|
+
for solution in solutions:
|
|
114
|
+
solution = np.array(solution)
|
|
115
|
+
# build an operator to map the bit vector to scalar
|
|
116
|
+
base_count = np.floor(np.log2(model.upper_bound))+1
|
|
117
|
+
assert np.sum(base_count) == solution.shape[0], "Incorrect solution-upper bound match"
|
|
118
|
+
m = model.upper_bound.shape[0]
|
|
119
|
+
n = solution.shape[0]
|
|
120
|
+
D = np.zeros((m, n), dtype=np.int32)
|
|
121
|
+
j = 0
|
|
122
|
+
for i in range(m):
|
|
123
|
+
k = int(base_count[i])
|
|
124
|
+
D[i, j:j+k] = 2**np.arange(k)
|
|
125
|
+
j += k
|
|
126
|
+
solution = D@solution
|
|
127
|
+
new_solutions.append(solution)
|
|
128
|
+
solutions = np.array(new_solutions)
|
|
129
|
+
if hasattr(model, "evaluateObjective"):
|
|
130
|
+
objectives = np.zeros((solutions.shape[0],), dtype=np.float32)
|
|
131
|
+
for i in range(solutions.shape[0]):
|
|
132
|
+
try:
|
|
133
|
+
objective = model.evaluateObjective(solutions[i])
|
|
134
|
+
except NotImplementedError:
|
|
135
|
+
warnings.warn(f"Cannot set objective value in results for {model.__class__}")
|
|
136
|
+
objectives = None
|
|
137
|
+
break
|
|
138
|
+
objectives[i] = objective
|
|
139
|
+
else:
|
|
140
|
+
objectives = None
|
|
141
|
+
if hasattr(model, "evaluatePenalties"):
|
|
142
|
+
penalties = np.zeros((solutions.shape[0],), dtype=np.float32)
|
|
143
|
+
for i in range(solutions.shape[0]):
|
|
144
|
+
penalties[i] = model.evaluatePenalties(solutions[i]) + model.offset
|
|
145
|
+
else:
|
|
146
|
+
penalties = None
|
|
147
|
+
counts = np.array(response["results"]["counts"])
|
|
148
|
+
job_id = response["job_info"]["job_id"]
|
|
149
|
+
try:
|
|
150
|
+
metrics = solver.client.get_job_metrics(job_id=job_id)
|
|
151
|
+
metrics = metrics["job_metrics"]
|
|
152
|
+
time_ns = metrics["time_ns"]
|
|
153
|
+
device = time_ns["device"][device_type]
|
|
154
|
+
runtime = device["samples"]["runtime"]
|
|
155
|
+
post = device["samples"].get("postprocessing_time", [0 for t in runtime])
|
|
156
|
+
pre = device["samples"].get("preprocessing_time", 0)
|
|
157
|
+
except KeyError:
|
|
158
|
+
time_ns = []
|
|
159
|
+
runtime = []
|
|
160
|
+
post = []
|
|
161
|
+
pre = None
|
|
162
|
+
results = SolutionResults(solutions, energies, counts, objectives,
|
|
163
|
+
runtime, pre, post, penalties=penalties,
|
|
164
|
+
device=device_type, time_units="ns")
|
|
165
|
+
|
|
166
|
+
return results
|
|
@@ -8,28 +8,204 @@ import json
|
|
|
8
8
|
import warnings
|
|
9
9
|
from functools import wraps
|
|
10
10
|
import numpy as np
|
|
11
|
+
from sklearn.preprocessing import MinMaxScaler
|
|
11
12
|
|
|
12
13
|
from eqc_models.ml.classifierbase import ClassifierBase
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
class QSVMClassifier(ClassifierBase):
|
|
16
17
|
"""An implementation of QSVM classifier that uses QCi's Dirac-3.
|
|
17
|
-
|
|
18
|
+
|
|
19
|
+
Parameters
|
|
20
|
+
----------
|
|
21
|
+
|
|
22
|
+
relaxation_schedule: Relaxation schedule used by Dirac-3; default:
|
|
23
|
+
2.
|
|
24
|
+
|
|
25
|
+
num_samples: Number of samples used by Dirac-3; default: 1.
|
|
26
|
+
|
|
27
|
+
lambda_coef: The penalty multipler
|
|
28
|
+
|
|
29
|
+
Examples
|
|
30
|
+
-----------
|
|
31
|
+
|
|
32
|
+
>>> from sklearn import datasets
|
|
33
|
+
>>> from sklearn.preprocessing import MinMaxScaler
|
|
34
|
+
>>> from sklearn.model_selection import train_test_split
|
|
35
|
+
>>> iris = datasets.load_iris()
|
|
36
|
+
>>> X = iris.data
|
|
37
|
+
>>> y = iris.target
|
|
38
|
+
>>> scaler = MinMaxScaler()
|
|
39
|
+
>>> X = scaler.fit_transform(X)
|
|
40
|
+
>>> for i in range(len(y)):
|
|
41
|
+
... if y[i] == 0:
|
|
42
|
+
... y[i] = -1
|
|
43
|
+
... elif y[i] == 2:
|
|
44
|
+
... y[i] = 1
|
|
45
|
+
>>> X_train, X_test, y_train, y_test = train_test_split(
|
|
46
|
+
... X,
|
|
47
|
+
... y,
|
|
48
|
+
... test_size=0.2,
|
|
49
|
+
... random_state=42,
|
|
50
|
+
... )
|
|
51
|
+
>>> from eqc_models.ml.classifierqsvm import QSVMClassifier
|
|
52
|
+
>>> obj = QSVMClassifier(
|
|
53
|
+
... relaxation_schedule=2,
|
|
54
|
+
... num_samples=1,
|
|
55
|
+
... )
|
|
56
|
+
>>> from contextlib import redirect_stdout
|
|
57
|
+
>>> import io
|
|
58
|
+
>>> f = io.StringIO()
|
|
59
|
+
>>> with redirect_stdout(f):
|
|
60
|
+
... obj = obj.fit(X_train, y_train)
|
|
61
|
+
... y_train_prd = obj.predict(X_train)
|
|
62
|
+
... y_test_prd = obj.predict(X_test)
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
relaxation_schedule=1,
|
|
69
|
+
num_samples=1,
|
|
70
|
+
lambda_coef=1.0,
|
|
71
|
+
):
|
|
72
|
+
super(QSVMClassifier).__init__()
|
|
73
|
+
|
|
74
|
+
self.relaxation_schedule = relaxation_schedule
|
|
75
|
+
self.num_samples = num_samples
|
|
76
|
+
self.lambda_coef = lambda_coef
|
|
77
|
+
self.fea_scaler = MinMaxScaler(feature_range=(-1, 1))
|
|
78
|
+
|
|
79
|
+
def fit(self, X, y):
|
|
80
|
+
"""
|
|
81
|
+
Build a QSVM classifier from the training set (X, y).
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
86
|
+
The training input samples.
|
|
87
|
+
|
|
88
|
+
y : array-like of shape (n_samples,)
|
|
89
|
+
The target values.
|
|
90
|
+
|
|
91
|
+
Returns
|
|
92
|
+
-------
|
|
93
|
+
Response of Dirac-3 in JSON format.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
assert X.shape[0] == y.shape[0], "Inconsistent sizes!"
|
|
97
|
+
|
|
98
|
+
assert set(y) == {-1, 1}, "Target values should be in {-1, 1}"
|
|
99
|
+
|
|
100
|
+
X = self.fea_scaler.fit_transform(X)
|
|
101
|
+
|
|
102
|
+
J, C, sum_constraint = self.get_hamiltonian(X, y)
|
|
103
|
+
|
|
104
|
+
assert J.shape[0] == J.shape[1], "Inconsistent hamiltonian size!"
|
|
105
|
+
assert J.shape[0] == C.shape[0], "Inconsistent hamiltonian size!"
|
|
106
|
+
|
|
107
|
+
self.set_model(J, C, sum_constraint)
|
|
108
|
+
|
|
109
|
+
sol, response = self.solve()
|
|
110
|
+
|
|
111
|
+
assert len(sol) == C.shape[0], "Inconsistent solution size!"
|
|
112
|
+
|
|
113
|
+
self.params = self.convert_sol_to_params(sol)
|
|
114
|
+
|
|
115
|
+
self.X_train = X
|
|
116
|
+
self.y_train = y
|
|
117
|
+
|
|
118
|
+
return response
|
|
119
|
+
|
|
120
|
+
def predict_raw(self, X: np.array):
|
|
121
|
+
"""
|
|
122
|
+
Predict classes for X.
|
|
123
|
+
|
|
124
|
+
Parameters
|
|
125
|
+
----------
|
|
126
|
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
127
|
+
|
|
128
|
+
Returns
|
|
129
|
+
-------
|
|
130
|
+
y : ndarray of shape (n_samples,)
|
|
131
|
+
The predicted classes.
|
|
132
|
+
"""
|
|
133
|
+
n_records = X.shape[0]
|
|
134
|
+
X = self.fea_scaler.transform(X)
|
|
135
|
+
X_tilde = np.concatenate((X, np.ones((n_records, 1))), axis=1)
|
|
136
|
+
|
|
137
|
+
y = np.einsum("i,ki->k", self.params, X_tilde)
|
|
138
|
+
|
|
139
|
+
return y
|
|
140
|
+
|
|
141
|
+
def predict(self, X: np.array):
|
|
142
|
+
"""
|
|
143
|
+
Predict classes for X.
|
|
144
|
+
|
|
145
|
+
Parameters
|
|
146
|
+
----------
|
|
147
|
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
y : ndarray of shape (n_samples,)
|
|
152
|
+
The predicted classes.
|
|
153
|
+
"""
|
|
154
|
+
|
|
155
|
+
y = self.predict_raw(X)
|
|
156
|
+
y = np.sign(y)
|
|
157
|
+
|
|
158
|
+
return y
|
|
159
|
+
|
|
160
|
+
def get_hamiltonian(
|
|
161
|
+
self,
|
|
162
|
+
X: np.array,
|
|
163
|
+
y: np.array,
|
|
164
|
+
):
|
|
165
|
+
n_records = X.shape[0]
|
|
166
|
+
n_dims = X.shape[1]
|
|
167
|
+
|
|
168
|
+
J = np.zeros(shape=(1 + n_dims, 1 + n_dims), dtype=np.float32)
|
|
169
|
+
C = np.zeros(shape=(1 + n_dims,), dtype=np.float32)
|
|
170
|
+
|
|
171
|
+
X_tilde = np.concatenate((X, np.ones((n_records, 1))), axis=1)
|
|
172
|
+
|
|
173
|
+
J = self.lambda_coef * np.einsum(
|
|
174
|
+
"i,ik,il->kl", y**2, X_tilde, X_tilde
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
for k in range(n_dims):
|
|
178
|
+
J[k][k] += 0.5
|
|
179
|
+
|
|
180
|
+
C = -2.0 * self.lambda_coef * np.einsum("i,ik->k", y, X_tilde)
|
|
181
|
+
|
|
182
|
+
J = 0.5 * (J + J.transpose())
|
|
183
|
+
C = C.reshape((1 + n_dims, 1))
|
|
184
|
+
|
|
185
|
+
return J, C, 1.0
|
|
186
|
+
|
|
187
|
+
def convert_sol_to_params(self, sol):
|
|
188
|
+
return np.array(sol)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class QSVMClassifierDual(ClassifierBase):
|
|
192
|
+
"""An implementation of dual QSVM classifier that uses QCi's Dirac-3.
|
|
193
|
+
|
|
18
194
|
Parameters
|
|
19
195
|
----------
|
|
20
|
-
|
|
196
|
+
|
|
21
197
|
relaxation_schedule: Relaxation schedule used by Dirac-3; default:
|
|
22
198
|
2.
|
|
23
|
-
|
|
199
|
+
|
|
24
200
|
num_samples: Number of samples used by Dirac-3; default: 1.
|
|
25
|
-
|
|
201
|
+
|
|
26
202
|
upper_limit: Coefficient upper limit; a regularization parameter;
|
|
27
203
|
default: 1.0.
|
|
28
|
-
|
|
204
|
+
|
|
29
205
|
gamma: Gaussian kernel parameter; default: 1.0.
|
|
30
|
-
|
|
206
|
+
|
|
31
207
|
eta: A penalty multiplier; default: 1.0.
|
|
32
|
-
|
|
208
|
+
|
|
33
209
|
zeta: A penalty multiplier; default: 1.0.
|
|
34
210
|
|
|
35
211
|
Examples
|
|
@@ -54,8 +230,8 @@ class QSVMClassifier(ClassifierBase):
|
|
|
54
230
|
... test_size=0.2,
|
|
55
231
|
... random_state=42,
|
|
56
232
|
... )
|
|
57
|
-
>>> from eqc_models.ml.classifierqsvm import
|
|
58
|
-
>>> obj =
|
|
233
|
+
>>> from eqc_models.ml.classifierqsvm import QSVMClassifierDual
|
|
234
|
+
>>> obj = QSVMClassifierDual(
|
|
59
235
|
... relaxation_schedule=2,
|
|
60
236
|
... num_samples=1,
|
|
61
237
|
... upper_limit=1.0,
|
|
@@ -70,9 +246,9 @@ class QSVMClassifier(ClassifierBase):
|
|
|
70
246
|
... obj = obj.fit(X_train, y_train)
|
|
71
247
|
... y_train_prd = obj.predict(X_train)
|
|
72
248
|
... y_test_prd = obj.predict(X_test)
|
|
73
|
-
|
|
249
|
+
|
|
74
250
|
"""
|
|
75
|
-
|
|
251
|
+
|
|
76
252
|
def __init__(
|
|
77
253
|
self,
|
|
78
254
|
relaxation_schedule=2,
|
|
@@ -81,8 +257,8 @@ class QSVMClassifier(ClassifierBase):
|
|
|
81
257
|
gamma=1.0,
|
|
82
258
|
eta=1.0,
|
|
83
259
|
zeta=1.0,
|
|
84
|
-
):
|
|
85
|
-
super(
|
|
260
|
+
):
|
|
261
|
+
super(QSVMClassifierDual).__init__()
|
|
86
262
|
|
|
87
263
|
self.relaxation_schedule = relaxation_schedule
|
|
88
264
|
self.num_samples = num_samples
|
|
@@ -97,20 +273,20 @@ class QSVMClassifier(ClassifierBase):
|
|
|
97
273
|
def fit(self, X, y):
|
|
98
274
|
"""
|
|
99
275
|
Build a QSVM classifier from the training set (X, y).
|
|
100
|
-
|
|
276
|
+
|
|
101
277
|
Parameters
|
|
102
278
|
----------
|
|
103
279
|
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
104
|
-
The training input samples.
|
|
105
|
-
|
|
280
|
+
The training input samples.
|
|
281
|
+
|
|
106
282
|
y : array-like of shape (n_samples,)
|
|
107
283
|
The target values.
|
|
108
|
-
|
|
284
|
+
|
|
109
285
|
Returns
|
|
110
286
|
-------
|
|
111
287
|
Response of Dirac-3 in JSON format.
|
|
112
288
|
"""
|
|
113
|
-
|
|
289
|
+
|
|
114
290
|
assert X.shape[0] == y.shape[0], "Inconsistent sizes!"
|
|
115
291
|
|
|
116
292
|
assert set(y) == {-1, 1}, "Target values should be in {-1, 1}"
|
|
@@ -144,17 +320,17 @@ class QSVMClassifier(ClassifierBase):
|
|
|
144
320
|
def predict(self, X: np.array):
|
|
145
321
|
"""
|
|
146
322
|
Predict classes for X.
|
|
147
|
-
|
|
323
|
+
|
|
148
324
|
Parameters
|
|
149
325
|
----------
|
|
150
326
|
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
151
|
-
|
|
327
|
+
|
|
152
328
|
Returns
|
|
153
329
|
-------
|
|
154
330
|
y : ndarray of shape (n_samples,)
|
|
155
331
|
The predicted classes.
|
|
156
332
|
"""
|
|
157
|
-
|
|
333
|
+
|
|
158
334
|
assert self.X_train is not None, "Model not trained yet!"
|
|
159
335
|
assert self.y_train is not None, "Model not trained yet!"
|
|
160
336
|
|