eqc-models 0.9.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eqc_models-0.9.8.data/platlib/compile_extensions.py +23 -0
- eqc_models-0.9.8.data/platlib/eqc_models/__init__.py +15 -0
- eqc_models-0.9.8.data/platlib/eqc_models/algorithms/__init__.py +4 -0
- eqc_models-0.9.8.data/platlib/eqc_models/algorithms/base.py +10 -0
- eqc_models-0.9.8.data/platlib/eqc_models/algorithms/penaltymultiplier.py +169 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/__init__.py +6 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/allocation.py +367 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/portbase.py +128 -0
- eqc_models-0.9.8.data/platlib/eqc_models/allocation/portmomentum.py +137 -0
- eqc_models-0.9.8.data/platlib/eqc_models/assignment/__init__.py +5 -0
- eqc_models-0.9.8.data/platlib/eqc_models/assignment/qap.py +82 -0
- eqc_models-0.9.8.data/platlib/eqc_models/assignment/setpartition.py +170 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/__init__.py +72 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/base.py +150 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/constraints.py +276 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/operators.py +201 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.c +11363 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.cpython-310-darwin.so +0 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polyeval.pyx +72 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/polynomial.py +274 -0
- eqc_models-0.9.8.data/platlib/eqc_models/base/quadratic.py +250 -0
- eqc_models-0.9.8.data/platlib/eqc_models/decoding.py +20 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/__init__.py +5 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/base.py +63 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/hypergraph.py +307 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/maxcut.py +155 -0
- eqc_models-0.9.8.data/platlib/eqc_models/graph/maxkcut.py +184 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/__init__.py +15 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierbase.py +99 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqboost.py +423 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/classifierqsvm.py +237 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/clustering.py +323 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/clusteringbase.py +112 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/decomposition.py +363 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/forecast.py +255 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/forecastbase.py +139 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/regressor.py +220 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/regressorbase.py +97 -0
- eqc_models-0.9.8.data/platlib/eqc_models/ml/reservoir.py +106 -0
- eqc_models-0.9.8.data/platlib/eqc_models/sequence/__init__.py +5 -0
- eqc_models-0.9.8.data/platlib/eqc_models/sequence/tsp.py +217 -0
- eqc_models-0.9.8.data/platlib/eqc_models/solvers/__init__.py +12 -0
- eqc_models-0.9.8.data/platlib/eqc_models/solvers/qciclient.py +707 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/__init__.py +6 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/fileio.py +38 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/polynomial.py +137 -0
- eqc_models-0.9.8.data/platlib/eqc_models/utilities/qplib.py +375 -0
- eqc_models-0.9.8.dist-info/LICENSE.txt +202 -0
- eqc_models-0.9.8.dist-info/METADATA +139 -0
- eqc_models-0.9.8.dist-info/RECORD +52 -0
- eqc_models-0.9.8.dist-info/WHEEL +5 -0
- eqc_models-0.9.8.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from setuptools import Extension
|
|
2
|
+
from setuptools.command.build_py import build_py as _build_py
|
|
3
|
+
import numpy
|
|
4
|
+
|
|
5
|
+
# Modules to be compiled and include_dirs when necessary
|
|
6
|
+
extensions = [
|
|
7
|
+
Extension(
|
|
8
|
+
"eqc_models.base.polyeval",
|
|
9
|
+
["eqc_models/base/polyeval.pyx"], include_dirs=[numpy.get_include()],
|
|
10
|
+
),
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
class build_py(_build_py):
|
|
14
|
+
def run(self):
|
|
15
|
+
self.run_command("build_ext")
|
|
16
|
+
return super().run()
|
|
17
|
+
|
|
18
|
+
def initialize_options(self):
|
|
19
|
+
super().initialize_options()
|
|
20
|
+
if self.distribution.ext_modules == None:
|
|
21
|
+
self.distribution.ext_modules = []
|
|
22
|
+
|
|
23
|
+
self.distribution.ext_modules.extend(extensions)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# (C) Quantum Computing Inc., 2024.
|
|
2
|
+
"""
|
|
3
|
+
eqc-models package for high-level optimization modeling for EQC and other devices
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .base import QuadraticModel, PolynomialModel
|
|
8
|
+
from .solvers import (Dirac1CloudSolver, Dirac3CloudSolver, Dirac3DirectSolver)
|
|
9
|
+
from .allocation import AllocationModel, AllocationModelX, ResourceRuleEnum
|
|
10
|
+
from .assignment import QAPModel
|
|
11
|
+
|
|
12
|
+
__all__ = ["QuadraticModel", "PolynomialModel", "Dirac1CloudSolver",
|
|
13
|
+
"Dirac3CloudSolver", "AllocationModel", "AllocationModelX",
|
|
14
|
+
"Dirac3DirectSolver", "ResourceRuleEnum",
|
|
15
|
+
"QAPModel"]
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# (C) Quantum Computing Inc., 2024.
|
|
2
|
+
from typing import Union
|
|
3
|
+
import logging
|
|
4
|
+
import numpy as np
|
|
5
|
+
from eqc_models.base.constraints import ConstraintModel
|
|
6
|
+
from eqc_models.base.base import ModelSolver
|
|
7
|
+
from eqc_models.algorithms.base import Algorithm
|
|
8
|
+
|
|
9
|
+
log = logging.getLogger(name=__name__)
|
|
10
|
+
|
|
11
|
+
class PenaltyMultiplierAlgorithm(Algorithm):
|
|
12
|
+
"""
|
|
13
|
+
Parameters
|
|
14
|
+
----------
|
|
15
|
+
|
|
16
|
+
model : ConstraintModel
|
|
17
|
+
Instance of a model to search out a penalty multiplier value, must be constrained model.
|
|
18
|
+
solver : ModelSolver subclass
|
|
19
|
+
Instance of a solver class to use to run the algorithm.
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
Properties
|
|
23
|
+
----------
|
|
24
|
+
|
|
25
|
+
upper_bound : float
|
|
26
|
+
Upper bound value for the objective function, this need not be a least upper bound,
|
|
27
|
+
but the tighter the value, the more efficient the search
|
|
28
|
+
|
|
29
|
+
solutions : List
|
|
30
|
+
The solutions found during the algorithm run
|
|
31
|
+
alphas : List
|
|
32
|
+
The values of multiplier found at each algorithm iteration
|
|
33
|
+
penalties : List
|
|
34
|
+
The values for penalties found at each algorithm iteration. A penalty of 0
|
|
35
|
+
indicates algorithm termination.
|
|
36
|
+
dynamic_range : List
|
|
37
|
+
The values for the dynamic range of the unconstrained problem formulation,
|
|
38
|
+
which is useful for identifying difficulty in representation of the problem
|
|
39
|
+
on the analog device.
|
|
40
|
+
|
|
41
|
+
The penalty multiplier search algorithm uses an infeasible solution to select the next
|
|
42
|
+
value for the penalty multiplier. The algorithm depends upon good solutions and only
|
|
43
|
+
guarantees termination when the solution found for a given multiplier is optimal. For
|
|
44
|
+
this reason, the implementation will terminate when no progress is made, thus making
|
|
45
|
+
it a heuristic. Providing an exact solver for the solver instance will guarantee that
|
|
46
|
+
the algorithm is correct and the penalty mulktiplier found is the minimal multiplier
|
|
47
|
+
capable of enforcing the condition that an unconstrained objective value for a feasible
|
|
48
|
+
solution is less than an unconstrained objective value for an infeasible solution.
|
|
49
|
+
|
|
50
|
+
This example uses the quadratic assignment problem and the known multiplier to test
|
|
51
|
+
the implementation of the algorithm.
|
|
52
|
+
|
|
53
|
+
>>> from eqc_models.solvers.qciclient import Dirac3CloudSolver
|
|
54
|
+
>>> from eqc_models.assignment.qap import QAPModel
|
|
55
|
+
>>> A = np.array([[0, 5, 8, 0, 1],
|
|
56
|
+
... [0, 0, 0, 10, 15],
|
|
57
|
+
... [0, 0, 0, 13, 18],
|
|
58
|
+
... [0, 0, 0, 0, 0.],
|
|
59
|
+
... [0, 0, 0, 1, 0.]])
|
|
60
|
+
>>> B = np.array([[0, 8.54, 6.4, 10, 8.94],
|
|
61
|
+
... [8.54, 0, 4.47, 5.39, 6.49],
|
|
62
|
+
... [6.4, 4.47, 0, 3.61, 3.0],
|
|
63
|
+
... [10, 5.39, 3.61, 0, 2.0],
|
|
64
|
+
... [8.94, 6.49, 3.0, 2.0, 0.]])
|
|
65
|
+
>>> C = np.array([[2, 3, 6, 3, 7],
|
|
66
|
+
... [3, 9, 2, 5, 9],
|
|
67
|
+
... [2, 6, 4, 1, 2],
|
|
68
|
+
... [7, 5, 8, 5, 7],
|
|
69
|
+
... [1, 9, 2, 9, 2.]])
|
|
70
|
+
>>> model = QAPModel(A, B, C)
|
|
71
|
+
>>> solver = Dirac3CloudSolver() # must be configured with environment variables
|
|
72
|
+
>>> algo = PenaltyMultiplierAlgorithm(model, solver)
|
|
73
|
+
>>> algo.upper_bound = 330.64
|
|
74
|
+
>>> algo.run(relaxation_schedule=2, mean_photon_number=0.15, normalized_loss_rate=4, num_samples=5) # doctest: +ELLIPSIS
|
|
75
|
+
2... RUNNING... COMPLETED...
|
|
76
|
+
>>> algo.alphas[-1] # doctest: +SKIP
|
|
77
|
+
106.25
|
|
78
|
+
>>> algo.penalties[-1] # doctest: +SKIP
|
|
79
|
+
0.0
|
|
80
|
+
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(self, model : ConstraintModel, solver : ModelSolver):
|
|
84
|
+
self.model = model
|
|
85
|
+
self.solver = solver
|
|
86
|
+
# ub = np.max(model.quad_objective)
|
|
87
|
+
# if ub < np.max(model.linear_objective):
|
|
88
|
+
# ub = np.max(model.linear_objective)
|
|
89
|
+
# ub *= model.sum_constraint
|
|
90
|
+
# else:
|
|
91
|
+
# ub *= model.sum_constraint ** 2
|
|
92
|
+
self.ub = None # ub
|
|
93
|
+
self.solutions = None
|
|
94
|
+
self.penalties = None
|
|
95
|
+
self.alphas = None
|
|
96
|
+
self.dynamic_range = None
|
|
97
|
+
self.responses = None
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def upper_bound(self) -> float:
|
|
101
|
+
return self.ub
|
|
102
|
+
|
|
103
|
+
@upper_bound.setter
|
|
104
|
+
def upper_bound(self, value : float):
|
|
105
|
+
self.ub = value
|
|
106
|
+
|
|
107
|
+
def run(self, initial_alpha : float=None, initial_solution : np.array = None, **kwargs):
|
|
108
|
+
""" Start with a guess at alpha, iterate until alpha is sufficiently large """
|
|
109
|
+
|
|
110
|
+
self.solutions = solutions = []
|
|
111
|
+
self.penalties = penalties = []
|
|
112
|
+
self.alphas = alphas = []
|
|
113
|
+
self.dynamic_range = dynamic_range = []
|
|
114
|
+
self.responses = responses = []
|
|
115
|
+
self.energies = energies = []
|
|
116
|
+
|
|
117
|
+
model = self.model
|
|
118
|
+
solver = self.solver
|
|
119
|
+
offset = model.offset
|
|
120
|
+
ub = self.ub
|
|
121
|
+
if initial_alpha is None and offset > 0:
|
|
122
|
+
alpha = ub / offset
|
|
123
|
+
log.info("UPPER BOUND %f OFFSET %f -> ALPHA %f",
|
|
124
|
+
ub, offset, alpha)
|
|
125
|
+
if alpha < 1:
|
|
126
|
+
alpha = 1
|
|
127
|
+
elif initial_alpha is not None:
|
|
128
|
+
alpha = initial_alpha
|
|
129
|
+
else:
|
|
130
|
+
log.info("No tricks for initial alpha, setting to 1")
|
|
131
|
+
alpha = 1
|
|
132
|
+
|
|
133
|
+
if initial_solution is not None:
|
|
134
|
+
log.debug("INITIAL SOLUTION GIVEN")
|
|
135
|
+
obj_val = model.evaluate(initial_solution, alpha, True)
|
|
136
|
+
penalty = model.evaluatePenalties(initial_solution) + offset
|
|
137
|
+
log.info("INITIAL SOLUTION OBJECTIVE %f PENALTY %f", obj_val, penalty)
|
|
138
|
+
if obj_val < ub:
|
|
139
|
+
alpha += (ub - obj_val) / penalty
|
|
140
|
+
log.info("INITIAL SOLUTION DETERMINED ALPHA %f", alpha)
|
|
141
|
+
else:
|
|
142
|
+
penalty = None
|
|
143
|
+
|
|
144
|
+
while penalty is None or penalty > 1e-6:
|
|
145
|
+
log.info("NEW RUN")
|
|
146
|
+
log.info("SETTING MULTIPLIER %f", alpha)
|
|
147
|
+
model.penalty_multiplier = float(alpha)
|
|
148
|
+
log.info("GOT MULTIPLIER %f NEW OFFSET %f", model.penalty_multiplier,
|
|
149
|
+
model.penalty_multiplier * model.offset)
|
|
150
|
+
dynamic_range.append(float(model.dynamic_range))
|
|
151
|
+
log.info("CALLING SOLVE WITH ALPHA %f DYNAMIC RANGE %f", alpha, dynamic_range[-1])
|
|
152
|
+
alphas.append(float(alpha))
|
|
153
|
+
response = solver.solve(model, **kwargs)
|
|
154
|
+
responses.append(response)
|
|
155
|
+
results = response["results"]
|
|
156
|
+
solution = np.array(results["solutions"][0])
|
|
157
|
+
solutions.append(solution)
|
|
158
|
+
penalty = model.evaluatePenalties(solution) + offset
|
|
159
|
+
penalties.append(float(penalty))
|
|
160
|
+
obj_val = model.evaluate(solution, alpha, True)
|
|
161
|
+
less_offset = model.evaluate(solution, alpha, False)
|
|
162
|
+
energies.append(results["energies"][0])
|
|
163
|
+
log.info("NEW SOLUTION OBJECTIVE %f LESS OFFSET %f ENERGY %f PENALTY %f",
|
|
164
|
+
obj_val, less_offset, energies[-1], penalty)
|
|
165
|
+
if obj_val < ub:
|
|
166
|
+
alpha += (ub - obj_val) / penalty
|
|
167
|
+
if abs(sum(penalties[-2:])/2-penalty) < 1e-4:
|
|
168
|
+
log.warn("SUFFICIENT PROGRESS NOT MADE FOR THREE ITERATIONS, QUITTING")
|
|
169
|
+
break
|
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
# (C) Quantum Computing Inc., 2024.
|
|
2
|
+
from typing import List, Tuple
|
|
3
|
+
from enum import Enum
|
|
4
|
+
import numpy as np
|
|
5
|
+
from eqc_models.base.constraints import InequalitiesMixin
|
|
6
|
+
from eqc_models.base.quadratic import ConstrainedQuadraticModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AllocationModel(ConstrainedQuadraticModel):
|
|
10
|
+
"""
|
|
11
|
+
Parameters
|
|
12
|
+
----------
|
|
13
|
+
|
|
14
|
+
resources: List
|
|
15
|
+
names of available resources.
|
|
16
|
+
tasks: List
|
|
17
|
+
names of tasks.
|
|
18
|
+
resource_usage: List of Lists or 2D np.ndarray
|
|
19
|
+
rows represent tasks and columns represent resources,
|
|
20
|
+
specifying the amount of each resource required per task.
|
|
21
|
+
resource_limits: 1D array or List
|
|
22
|
+
specifying the limit on each resource.
|
|
23
|
+
cost_per_task: 1D array List
|
|
24
|
+
specifying the cost per task (or benefit with negative of value).
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
Attributes
|
|
28
|
+
----------
|
|
29
|
+
|
|
30
|
+
penalty_multiplier: float
|
|
31
|
+
value for weighting the penalties formed from the equality constraints
|
|
32
|
+
|
|
33
|
+
qubo: eqc_models.base.operators.QUBO
|
|
34
|
+
QUBO operator representation
|
|
35
|
+
|
|
36
|
+
polynomial: eqc_models.base.operators.Polynomial
|
|
37
|
+
Polynomial operator representation
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
This class represents a resource allocation model for maximizing total benefit. In other words,
|
|
41
|
+
Given a list of resources and a list of tasks, allocate the resources among the tasks so as to
|
|
42
|
+
maximize the economic benefit.
|
|
43
|
+
|
|
44
|
+
Here's an example. Five tasks must share 4 resources. Each task can use a different amount of
|
|
45
|
+
each resource.
|
|
46
|
+
|
|
47
|
+
+--------------------+------+------+---------+---------+---------+
|
|
48
|
+
| | Spam | Eggs | Coconut | Sparrow | Benefit |
|
|
49
|
+
+--------------------+------+------+---------+---------+---------+
|
|
50
|
+
| Breakfast | 1 | 2 | 0 | 0 | 3 |
|
|
51
|
+
+--------------------+------+------+---------+---------+---------+
|
|
52
|
+
| Countryside Stroll | 0 | 0 | 1 | 0 | 1 |
|
|
53
|
+
+--------------------+------+------+---------+---------+---------+
|
|
54
|
+
| Storm Castle | 0 | 12 | 1 | 1 | 10 |
|
|
55
|
+
+--------------------+------+------+---------+---------+---------+
|
|
56
|
+
| Availability | 1 | 12 | 2 | 1 | |
|
|
57
|
+
+--------------------+------+------+---------+---------+---------+
|
|
58
|
+
|
|
59
|
+
>>> resources = ["Spam", "Eggs", "Coconut", "Sparrow"]
|
|
60
|
+
>>> tasks = ["Breakfast", "Countryside Stroll", "Storm Castle"]
|
|
61
|
+
>>> resource_usage = [[1, 2, 0, 0], [0, 0, 1, 0], [0, 12, 1, 1]]
|
|
62
|
+
>>> resource_limits = [1, 12, 2, 1]
|
|
63
|
+
>>> cost_per_task = [-3, -1, -10.]
|
|
64
|
+
>>> allocation_model = AllocationModel(resources, tasks, resource_usage, resource_limits, cost_per_task)
|
|
65
|
+
>>> allocation_model.penalty_multiplier = 1
|
|
66
|
+
>>> C, J = allocation_model.H
|
|
67
|
+
>>> C # -3 -2 * (12 * 2 + 1 * 1), -1 -2 * 2*1, -10 -2 * (12 * 12 + 1 * 2 + 1 * 1)
|
|
68
|
+
... # doctest: +NORMALIZE_WHITESPACE
|
|
69
|
+
array([ -53., -5., -304.])
|
|
70
|
+
>>> J # doctest: +NORMALIZE_WHITESPACE
|
|
71
|
+
array([[ 5., 0., 24.],
|
|
72
|
+
[ 0., 1., 1.],
|
|
73
|
+
[ 24., 1., 146.]])
|
|
74
|
+
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self, resources: List, tasks: List, resource_usage: List, resource_limits: List,
|
|
78
|
+
cost_per_task: List):
|
|
79
|
+
if not isinstance(resources, list):
|
|
80
|
+
raise TypeError("Argument 'resources' must be a list")
|
|
81
|
+
if not isinstance(tasks, list):
|
|
82
|
+
raise TypeError("Argument 'tasks' must be a list")
|
|
83
|
+
if not isinstance(resource_usage, list):
|
|
84
|
+
raise TypeError("Argument 'resource_usage' must be a list")
|
|
85
|
+
if not isinstance(resource_limits, list):
|
|
86
|
+
raise TypeError("Argument 'resource_limits' must be a list")
|
|
87
|
+
if not isinstance(cost_per_task, list):
|
|
88
|
+
raise TypeError("Argument 'cost_per_task' must be a list")
|
|
89
|
+
# PARENT CLASS MUST HAVE TYPE CHECK
|
|
90
|
+
self.resources = resources
|
|
91
|
+
self.tasks = tasks
|
|
92
|
+
self.resource_usage = np.array(resource_usage)
|
|
93
|
+
self.resource_limits = np.array(resource_limits)
|
|
94
|
+
self.cost_per_task = np.array(cost_per_task)
|
|
95
|
+
super(AllocationModel, self).__init__(self.cost_per_task, np.zeros((len(self.tasks), len(self.tasks))), self.resource_usage.T, self.resource_limits)
|
|
96
|
+
# self.domains = np.array([np.floor(max(self.resource_limits[self.resource_limits != 0]) /
|
|
97
|
+
# min(self.resource_usage[self.resource_usage != 0]))] * len(self.tasks), dtype=int)
|
|
98
|
+
# self.upper_bound = [np.floor(max(self.resource_limits[self.resource_limits != 0]) /
|
|
99
|
+
# min(self.resource_usage[self.resource_usage != 0]))] * len(self.tasks)
|
|
100
|
+
self._validate_dimensions()
|
|
101
|
+
### NEED TO MAKE SURE BASE CLASS VALIDATES self.upper_bound ###
|
|
102
|
+
|
|
103
|
+
@property
|
|
104
|
+
def upper_bound(self) -> np.array:
|
|
105
|
+
return np.array([np.floor(max(self.resource_limits[self.resource_limits != 0]) /
|
|
106
|
+
min(self.resource_usage[self.resource_usage != 0]))] * len(self.tasks), dtype=int)
|
|
107
|
+
|
|
108
|
+
@upper_bound.setter
|
|
109
|
+
def upper_bound(self, value: List):
|
|
110
|
+
self._upper_bound = value
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def variables(self):
|
|
114
|
+
return [(i, j) for i in self.tasks for j in self.resources]
|
|
115
|
+
|
|
116
|
+
def _validate_dimensions(self):
|
|
117
|
+
"""Raises ValueErrors for inconsistent dimensions."""
|
|
118
|
+
|
|
119
|
+
# Check resource_usage dimensions
|
|
120
|
+
num_tasks = len(self.tasks)
|
|
121
|
+
num_resources = len(self.resources)
|
|
122
|
+
# print("RESOURCES", num_resources)
|
|
123
|
+
# print("TASKS", num_tasks)
|
|
124
|
+
# print(self.resource_usage.shape)
|
|
125
|
+
if self.resource_usage.shape != (num_tasks, num_resources):
|
|
126
|
+
raise ValueError("resource_usage matrix dimensions don't match number of tasks and resources")
|
|
127
|
+
|
|
128
|
+
# Check resource_limits length
|
|
129
|
+
if self.resource_limits.shape[0] != num_resources:
|
|
130
|
+
raise ValueError("resource_limits length doesn't match number of resources")
|
|
131
|
+
|
|
132
|
+
# Check cost_per_task length
|
|
133
|
+
if self.cost_per_task.shape[0] != num_tasks:
|
|
134
|
+
raise ValueError("cost_per_task length doesn't match number of tasks")
|
|
135
|
+
|
|
136
|
+
def add_task(self, task: str, task_resource_usage: List, task_cost: float):
|
|
137
|
+
"""
|
|
138
|
+
task: str
|
|
139
|
+
Name of task to add
|
|
140
|
+
task_resource_usage: List
|
|
141
|
+
Quantity of resource used for task for all tasks
|
|
142
|
+
task_cost: float
|
|
143
|
+
Quantity indicating the cost or benefit (negative) of the task
|
|
144
|
+
|
|
145
|
+
Add a task to the problem, modifying the resource usage and task cost arrays.
|
|
146
|
+
|
|
147
|
+
"""
|
|
148
|
+
self.tasks += [task]
|
|
149
|
+
self.resource_usage = np.vstack([self.resource_usage, task_resource_usage])
|
|
150
|
+
self.cost_per_task = np.append(self.cost_per_task, task_cost)
|
|
151
|
+
|
|
152
|
+
@property
|
|
153
|
+
def H(self) -> Tuple[np.ndarray,np.ndarray]:
|
|
154
|
+
""" Return linear, quadratic portions of the (quadratic) Hamiltonian """
|
|
155
|
+
Pl, Pq = self.penalties
|
|
156
|
+
alpha = self.penalty_multiplier
|
|
157
|
+
obj_linear, obj_quad = self.linear_objective, self.quad_objective #self._linear_objective(), self._quadratic_objective()
|
|
158
|
+
self._C = obj_linear + alpha * Pl
|
|
159
|
+
self._J = obj_quad + alpha * Pq
|
|
160
|
+
|
|
161
|
+
return obj_linear + alpha * Pl, obj_quad + alpha * Pq
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class ResourceRuleEnum(Enum):
|
|
165
|
+
"""
|
|
166
|
+
Enumeration of the allowed resource rules, mapping to the mathematical expression:
|
|
167
|
+
|
|
168
|
+
MAXIMUM -> LE (less than or equal to)
|
|
169
|
+
MINIMUM -> GE (greater than or equal to)
|
|
170
|
+
EXACT -> EQ (equal to)
|
|
171
|
+
|
|
172
|
+
"""
|
|
173
|
+
MAXIMUM = "LE"
|
|
174
|
+
MINIMUM = "GE"
|
|
175
|
+
EXACT = "EQ"
|
|
176
|
+
|
|
177
|
+
class AllocationModelX(InequalitiesMixin, AllocationModel):
|
|
178
|
+
"""
|
|
179
|
+
Parameters
|
|
180
|
+
----------
|
|
181
|
+
|
|
182
|
+
resources: List
|
|
183
|
+
names of available resources.
|
|
184
|
+
tasks: List
|
|
185
|
+
names of tasks.
|
|
186
|
+
resource_usage: List of Lists or 2D np.ndarray
|
|
187
|
+
rows represent tasks and columns represent resources,
|
|
188
|
+
specifying the amount of each resource required per task.
|
|
189
|
+
resource_limits: 1D array or List
|
|
190
|
+
specifying the limit on each resource.
|
|
191
|
+
resource_rule: List
|
|
192
|
+
ResourceRuleEnum values for each resource
|
|
193
|
+
cost_per_task: 1D array List
|
|
194
|
+
specifying the cost per task (or benefit with negative of value).
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
Attributes
|
|
198
|
+
----------
|
|
199
|
+
|
|
200
|
+
penalty_multiplier: float
|
|
201
|
+
value for weighting the penalties formed from the equality constraints
|
|
202
|
+
|
|
203
|
+
qubo: eqc_models.base.operators.QUBO
|
|
204
|
+
QUBO oeprator representation
|
|
205
|
+
|
|
206
|
+
polynomial: eqc_models.base.operators.Polynomial
|
|
207
|
+
Polynomial operator representation
|
|
208
|
+
|
|
209
|
+
variables: List
|
|
210
|
+
names of variables formed from tasks and assignments
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
This class represents a resource allocation model for maximizing total benefit. In other words,
|
|
214
|
+
Given a list of resources and a list of tasks, allocate the resources among the tasks so as to
|
|
215
|
+
maximize the economic benefit.
|
|
216
|
+
|
|
217
|
+
Adds resource_rule as an argument. This must be a list of strings specifying
|
|
218
|
+
constraints for each resource (LE, GE, or EQ).
|
|
219
|
+
|
|
220
|
+
Here's an example. Five tasks must share 4 resources. Each task can use a different amount of
|
|
221
|
+
each resource.
|
|
222
|
+
|
|
223
|
+
+--------------------+------+------+---------+---------+---------+
|
|
224
|
+
| | Spam | Eggs | Coconut | Sparrow | Benefit |
|
|
225
|
+
+--------------------+------+------+---------+---------+---------+
|
|
226
|
+
| Breakfast | 1 | 2 | 0 | 0 | 3 |
|
|
227
|
+
+--------------------+------+------+---------+---------+---------+
|
|
228
|
+
| Countryside Stroll | 0 | 0 | 1 | 0 | 1 |
|
|
229
|
+
+--------------------+------+------+---------+---------+---------+
|
|
230
|
+
| Storm Castle | 0 | 12 | 1 | 1 | 10 |
|
|
231
|
+
+--------------------+------+------+---------+---------+---------+
|
|
232
|
+
| Availability | 1 | 12 | 2 | 1 | |
|
|
233
|
+
+--------------------+------+------+---------+---------+---------+
|
|
234
|
+
|
|
235
|
+
>>> resources = ["Spam", "Eggs", "Coconut", "Sparrow"]
|
|
236
|
+
>>> tasks = ["Breakfast", "Countryside Stroll", "Storm Castle"]
|
|
237
|
+
>>> resource_usage = [[1, 2, 0, 0], [0, 0, 1, 0], [0, 12, 1, 1]]
|
|
238
|
+
>>> resource_limits = [1, 12, 2, 1]
|
|
239
|
+
>>> cost_per_task = [-3, -1, -10.]
|
|
240
|
+
>>> resource_rules = [ResourceRuleEnum.MAXIMUM for i in range(len(resources))]
|
|
241
|
+
>>> allocation_model = AllocationModelX(resources, tasks, resource_usage, resource_limits, resource_rules, cost_per_task)
|
|
242
|
+
>>> allocation_model.penalty_multiplier = 1
|
|
243
|
+
>>> C, J = allocation_model.H
|
|
244
|
+
>>> C # -3 -2 * (12 * 2 + 1 * 1), -1 -2 * 2*1, -10 -2 * (12 * 12 + 1 * 2 + 1 * 1)
|
|
245
|
+
... # doctest: +NORMALIZE_WHITESPACE
|
|
246
|
+
array([ -53., -5., -304., -2., -24., -4., -2.])
|
|
247
|
+
>>> J # doctest: +NORMALIZE_WHITESPACE
|
|
248
|
+
array([[ 5., 0., 24., 1., 2., 0., 0.],
|
|
249
|
+
[ 0., 1., 1., 0., 0., 1., 0.],
|
|
250
|
+
[ 24., 1., 146., 0., 12., 1., 1.],
|
|
251
|
+
[ 1., 0., 0., 1., 0., 0., 0.],
|
|
252
|
+
[ 2., 0., 12., 0., 1., 0., 0.],
|
|
253
|
+
[ 0., 1., 1., 0., 0., 1., 0.],
|
|
254
|
+
[ 0., 0., 1., 0., 0., 0., 1.]])
|
|
255
|
+
"""
|
|
256
|
+
def __init__(self, resources: List, tasks: List, resource_usage: List, resource_limits: List,
|
|
257
|
+
resource_rule: List[ResourceRuleEnum], cost_per_task: List):
|
|
258
|
+
super().__init__(resources, tasks, resource_usage, resource_limits, cost_per_task)
|
|
259
|
+
|
|
260
|
+
if not isinstance(resource_rule, list):
|
|
261
|
+
raise TypeError("Argument 'resource_rule' must be a list")
|
|
262
|
+
elif len(resource_rule) != len(resources):
|
|
263
|
+
raise ValueError("Argument 'resource_rule' must be the same length as 'resources'")
|
|
264
|
+
|
|
265
|
+
try:
|
|
266
|
+
check_rule = set([rule.value for rule in resource_rule])
|
|
267
|
+
if not check_rule.issubset({"LE", "GE", "EQ"}):
|
|
268
|
+
raise ValueError("Argument 'resource_rule' must contain only enums 'ResourceRuleEnum.MAXIMUM', "
|
|
269
|
+
"'ResourceRuleEnum.MINIMUM' or 'ResourceRuleEnum.EXACT'.")
|
|
270
|
+
except AttributeError as e:
|
|
271
|
+
# Handle the case where elements in resource_rule don't have a 'value' attribute (likely not enums)
|
|
272
|
+
raise TypeError("Argument 'resource_rule' must contain only enums. Elements lack a 'value' "
|
|
273
|
+
"attribute.") from e
|
|
274
|
+
|
|
275
|
+
self.senses = list([rule.value for rule in resource_rule])
|
|
276
|
+
|
|
277
|
+
@property
|
|
278
|
+
def upper_bound(self) -> np.array:
|
|
279
|
+
return np.array([np.floor(max(self.resource_limits[self.resource_limits != 0]) /
|
|
280
|
+
min(self.resource_usage[self.resource_usage != 0]))] * self.n, dtype=int)
|
|
281
|
+
|
|
282
|
+
@upper_bound.setter
|
|
283
|
+
def upper_bound(self, value: List):
|
|
284
|
+
self._upper_bound = value
|
|
285
|
+
|
|
286
|
+
@property
|
|
287
|
+
def linear_objective(self) -> np.ndarray:
|
|
288
|
+
"""
|
|
289
|
+
Returns a 1D numpy array representing the linear part of the objective function (total profit).
|
|
290
|
+
"""
|
|
291
|
+
return np.hstack([self.cost_per_task, np.zeros(self.num_slacks)])
|
|
292
|
+
|
|
293
|
+
@linear_objective.setter
|
|
294
|
+
def linear_objective(self, value : np.ndarray):
|
|
295
|
+
|
|
296
|
+
assert (value[len(self.cost_per_task):]==0).all(), "additional values beyond cost length must be 0"
|
|
297
|
+
|
|
298
|
+
self.cost_per_task[:] = value[:len(self.cost_per_task)]
|
|
299
|
+
|
|
300
|
+
@property
|
|
301
|
+
def quad_objective(self) -> np.ndarray:
|
|
302
|
+
"""
|
|
303
|
+
Returns a 2D numpy array representing the quadratic part of the objective function (always zero in this case).
|
|
304
|
+
"""
|
|
305
|
+
# No quadratic term
|
|
306
|
+
n = self.n
|
|
307
|
+
return np.zeros((n, n))
|
|
308
|
+
|
|
309
|
+
@quad_objective.setter
|
|
310
|
+
def quad_objective(self, value: np.ndarray):
|
|
311
|
+
""" Don't let anything be passed in except arrays of all 0 """
|
|
312
|
+
|
|
313
|
+
# the setting of this gets ignored, but speak if somebody brings a soul
|
|
314
|
+
# that hasn't passed yet (all must be 0)
|
|
315
|
+
|
|
316
|
+
assert (value==0).all(), "quadratic terms in objective must be 0"
|
|
317
|
+
|
|
318
|
+
@property
|
|
319
|
+
def H(self):
|
|
320
|
+
"""
|
|
321
|
+
Overrides the parent build method to incorporate slack variables based on resource_rule.
|
|
322
|
+
"""
|
|
323
|
+
# Build constraint penalties with slack variables
|
|
324
|
+
Pl, Pq = self.penalties
|
|
325
|
+
alpha = self.penalty_multiplier
|
|
326
|
+
obj_linear, obj_quad = self.linear_objective, self.quad_objective
|
|
327
|
+
|
|
328
|
+
self._C = obj_linear + alpha * Pl
|
|
329
|
+
self._J = obj_quad + alpha * Pq
|
|
330
|
+
|
|
331
|
+
return obj_linear + alpha * Pl, obj_quad + alpha * Pq
|
|
332
|
+
|
|
333
|
+
def checkResources(self, solution):
|
|
334
|
+
"""
|
|
335
|
+
Parameters
|
|
336
|
+
----------
|
|
337
|
+
solution: List or np.ndarray
|
|
338
|
+
solution vector to check for resource violations
|
|
339
|
+
|
|
340
|
+
Returns
|
|
341
|
+
-------
|
|
342
|
+
List of violations: (name, rule, violation quantity, message)
|
|
343
|
+
|
|
344
|
+
"""
|
|
345
|
+
violations = []
|
|
346
|
+
solution = np.array(solution)
|
|
347
|
+
for name, usage, rule, limit in zip(self.resources, self.resource_usage.T,
|
|
348
|
+
self.senses, self.resource_limits):
|
|
349
|
+
decision_vars = self.n - self.num_slacks
|
|
350
|
+
value = np.squeeze(usage@solution[:decision_vars])
|
|
351
|
+
if rule == ResourceRuleEnum.MINIMUM:
|
|
352
|
+
mult = -1
|
|
353
|
+
else:
|
|
354
|
+
mult = 1
|
|
355
|
+
if rule == ResourceRuleEnum.EXACT:
|
|
356
|
+
if value - limit != 0:
|
|
357
|
+
msg = f"{value} of {name} violates {rule} {limit}"
|
|
358
|
+
violations.append((name, rule, value - limit, msg))
|
|
359
|
+
else:
|
|
360
|
+
if mult * (value - limit) > 0:
|
|
361
|
+
msg = f"{value} of {name} violates {rule} {limit}"
|
|
362
|
+
violations.append((name, rule, (value - limit), msg))
|
|
363
|
+
return violations
|
|
364
|
+
|
|
365
|
+
@property
|
|
366
|
+
def n(self):
|
|
367
|
+
return len(self.tasks) + self.num_slacks
|