desdeo 1.2__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- desdeo/__init__.py +8 -8
- desdeo/api/README.md +73 -0
- desdeo/api/__init__.py +15 -0
- desdeo/api/app.py +40 -0
- desdeo/api/config.py +69 -0
- desdeo/api/config.toml +53 -0
- desdeo/api/db.py +25 -0
- desdeo/api/db_init.py +79 -0
- desdeo/api/db_models.py +164 -0
- desdeo/api/malaga_db_init.py +27 -0
- desdeo/api/models/__init__.py +66 -0
- desdeo/api/models/archive.py +34 -0
- desdeo/api/models/preference.py +90 -0
- desdeo/api/models/problem.py +507 -0
- desdeo/api/models/reference_point_method.py +18 -0
- desdeo/api/models/session.py +46 -0
- desdeo/api/models/state.py +96 -0
- desdeo/api/models/user.py +51 -0
- desdeo/api/routers/_NAUTILUS.py +245 -0
- desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
- desdeo/api/routers/_NIMBUS.py +762 -0
- desdeo/api/routers/__init__.py +5 -0
- desdeo/api/routers/problem.py +110 -0
- desdeo/api/routers/reference_point_method.py +117 -0
- desdeo/api/routers/session.py +76 -0
- desdeo/api/routers/test.py +16 -0
- desdeo/api/routers/user_authentication.py +366 -0
- desdeo/api/schema.py +94 -0
- desdeo/api/tests/__init__.py +0 -0
- desdeo/api/tests/conftest.py +59 -0
- desdeo/api/tests/test_models.py +701 -0
- desdeo/api/tests/test_routes.py +216 -0
- desdeo/api/utils/database.py +274 -0
- desdeo/api/utils/logger.py +29 -0
- desdeo/core.py +27 -0
- desdeo/emo/__init__.py +29 -0
- desdeo/emo/hooks/archivers.py +172 -0
- desdeo/emo/methods/EAs.py +418 -0
- desdeo/emo/methods/__init__.py +0 -0
- desdeo/emo/methods/bases.py +59 -0
- desdeo/emo/operators/__init__.py +1 -0
- desdeo/emo/operators/crossover.py +780 -0
- desdeo/emo/operators/evaluator.py +118 -0
- desdeo/emo/operators/generator.py +356 -0
- desdeo/emo/operators/mutation.py +1053 -0
- desdeo/emo/operators/selection.py +1036 -0
- desdeo/emo/operators/termination.py +178 -0
- desdeo/explanations/__init__.py +6 -0
- desdeo/explanations/explainer.py +100 -0
- desdeo/explanations/utils.py +90 -0
- desdeo/mcdm/__init__.py +19 -0
- desdeo/mcdm/nautili.py +345 -0
- desdeo/mcdm/nautilus.py +477 -0
- desdeo/mcdm/nautilus_navigator.py +655 -0
- desdeo/mcdm/nimbus.py +417 -0
- desdeo/mcdm/pareto_navigator.py +269 -0
- desdeo/mcdm/reference_point_method.py +116 -0
- desdeo/problem/__init__.py +79 -0
- desdeo/problem/evaluator.py +561 -0
- desdeo/problem/gurobipy_evaluator.py +562 -0
- desdeo/problem/infix_parser.py +341 -0
- desdeo/problem/json_parser.py +944 -0
- desdeo/problem/pyomo_evaluator.py +468 -0
- desdeo/problem/schema.py +1808 -0
- desdeo/problem/simulator_evaluator.py +298 -0
- desdeo/problem/sympy_evaluator.py +244 -0
- desdeo/problem/testproblems/__init__.py +73 -0
- desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
- desdeo/problem/testproblems/dtlz2_problem.py +102 -0
- desdeo/problem/testproblems/forest_problem.py +275 -0
- desdeo/problem/testproblems/knapsack_problem.py +163 -0
- desdeo/problem/testproblems/mcwb_problem.py +831 -0
- desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
- desdeo/problem/testproblems/momip_problem.py +172 -0
- desdeo/problem/testproblems/nimbus_problem.py +143 -0
- desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
- desdeo/problem/testproblems/re_problem.py +492 -0
- desdeo/problem/testproblems/river_pollution_problem.py +434 -0
- desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
- desdeo/problem/testproblems/simple_problem.py +351 -0
- desdeo/problem/testproblems/simulator_problem.py +92 -0
- desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
- desdeo/problem/testproblems/zdt_problem.py +271 -0
- desdeo/problem/utils.py +245 -0
- desdeo/tools/GenerateReferencePoints.py +181 -0
- desdeo/tools/__init__.py +102 -0
- desdeo/tools/generics.py +145 -0
- desdeo/tools/gurobipy_solver_interfaces.py +258 -0
- desdeo/tools/indicators_binary.py +11 -0
- desdeo/tools/indicators_unary.py +375 -0
- desdeo/tools/interaction_schema.py +38 -0
- desdeo/tools/intersection.py +54 -0
- desdeo/tools/iterative_pareto_representer.py +99 -0
- desdeo/tools/message.py +234 -0
- desdeo/tools/ng_solver_interfaces.py +199 -0
- desdeo/tools/non_dominated_sorting.py +133 -0
- desdeo/tools/patterns.py +281 -0
- desdeo/tools/proximal_solver.py +99 -0
- desdeo/tools/pyomo_solver_interfaces.py +464 -0
- desdeo/tools/reference_vectors.py +462 -0
- desdeo/tools/scalarization.py +3138 -0
- desdeo/tools/scipy_solver_interfaces.py +454 -0
- desdeo/tools/score_bands.py +464 -0
- desdeo/tools/utils.py +320 -0
- desdeo/utopia_stuff/__init__.py +0 -0
- desdeo/utopia_stuff/data/1.json +15 -0
- desdeo/utopia_stuff/data/2.json +13 -0
- desdeo/utopia_stuff/data/3.json +15 -0
- desdeo/utopia_stuff/data/4.json +17 -0
- desdeo/utopia_stuff/data/5.json +15 -0
- desdeo/utopia_stuff/from_json.py +40 -0
- desdeo/utopia_stuff/reinit_user.py +38 -0
- desdeo/utopia_stuff/utopia_db_init.py +212 -0
- desdeo/utopia_stuff/utopia_problem.py +403 -0
- desdeo/utopia_stuff/utopia_problem_old.py +415 -0
- desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
- desdeo-2.0.0.dist-info/LICENSE +21 -0
- desdeo-2.0.0.dist-info/METADATA +168 -0
- desdeo-2.0.0.dist-info/RECORD +120 -0
- {desdeo-1.2.dist-info → desdeo-2.0.0.dist-info}/WHEEL +1 -1
- desdeo-1.2.dist-info/METADATA +0 -16
- desdeo-1.2.dist-info/RECORD +0 -4
desdeo/tools/message.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
"""Defines the messaging protocol used by the various EMO operators."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any, Literal
|
|
5
|
+
|
|
6
|
+
from polars import DataFrame
|
|
7
|
+
from pydantic import BaseModel, ConfigDict, Field, field_serializer
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class CrossoverMessageTopics(Enum):
|
|
11
|
+
"""Topics for messages related to crossover operators."""
|
|
12
|
+
|
|
13
|
+
TEST = "TEST"
|
|
14
|
+
""" A message topic used only for testing the crossover operators. """
|
|
15
|
+
XOVER_PROBABILITY = "XOVER_PROBABILITY"
|
|
16
|
+
""" The current crossover probability. """
|
|
17
|
+
XOVER_DISTRIBUTION = "XOVER_DISTRIBUTION"
|
|
18
|
+
""" The current crossover distribution index. Primary used in the SBX crossover. """
|
|
19
|
+
PARENTS = "PARENTS"
|
|
20
|
+
""" The parents selected for crossover. """
|
|
21
|
+
OFFSPRINGS = "OFFSPRINGS"
|
|
22
|
+
""" The offsprings generated from the crossover. """
|
|
23
|
+
ALPHA = "ALPHA"
|
|
24
|
+
""" Alpha parameter used in crossover. """
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class MutationMessageTopics(Enum):
|
|
28
|
+
"""Topics for messages related to mutation operators."""
|
|
29
|
+
|
|
30
|
+
TEST = "TEST"
|
|
31
|
+
""" A message topic used only for testing the mutation operators. """
|
|
32
|
+
MUTATION_PROBABILITY = "MUTATION_PROBABILITY"
|
|
33
|
+
""" The current mutation probability. """
|
|
34
|
+
MUTATION_DISTRIBUTION = "MUTATION_DISTRIBUTION"
|
|
35
|
+
""" The current mutation distribution index. Primary used in the polynomial mutation. """
|
|
36
|
+
OFFSPRING_ORIGINAL = "OFFSPRING_ORIGINAL"
|
|
37
|
+
""" The original offsprings before mutation. """
|
|
38
|
+
OFFSPRINGS = "OFFSPRINGS"
|
|
39
|
+
""" The offsprings after mutation. """
|
|
40
|
+
PARENTS = "PARENTS"
|
|
41
|
+
""" The parents of the offsprings. """
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class EvaluatorMessageTopics(Enum):
|
|
45
|
+
"""Topics for messages related to evaluator operators."""
|
|
46
|
+
|
|
47
|
+
TEST = "TEST"
|
|
48
|
+
""" A message topic used only for testing the evaluator operators. """
|
|
49
|
+
POPULATION = "POPULATION"
|
|
50
|
+
""" The population to evaluate. """
|
|
51
|
+
OUTPUTS = "OUTPUTS"
|
|
52
|
+
""" The outputs of the population. Contains objectives, targets, constraints. """
|
|
53
|
+
OBJECTIVES = "OBJECTIVES"
|
|
54
|
+
""" The true objective values of the population. """
|
|
55
|
+
TARGETS = "TARGETS"
|
|
56
|
+
""" The targets, i.e., objective values seen by the evolutionary operators."""
|
|
57
|
+
CONSTRAINTS = "CONSTRAINTS"
|
|
58
|
+
""" The constraints of the population. """
|
|
59
|
+
VERBOSE_OUTPUTS = "VERBOSE_OUTPUTS"
|
|
60
|
+
""" Same as POPULATION + OUTPUTS."""
|
|
61
|
+
NEW_EVALUATIONS = "NEW_EVALUATIONS"
|
|
62
|
+
""" The number of new evaluations. """
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class GeneratorMessageTopics(Enum):
|
|
66
|
+
"""Topics for messages related to population generator operators."""
|
|
67
|
+
|
|
68
|
+
TEST = "TEST"
|
|
69
|
+
""" A message topic used only for testing the evaluator operators. """
|
|
70
|
+
POPULATION = "POPULATION"
|
|
71
|
+
""" The population to evaluate. """
|
|
72
|
+
OUTPUTS = "OUTPUTS"
|
|
73
|
+
""" The outputs of the population generation. Contains objectives, targets, and constraints. """
|
|
74
|
+
OBJECTIVES = "OBJECTIVES"
|
|
75
|
+
""" The true objective values of the population. """
|
|
76
|
+
TARGETS = "TARGETS"
|
|
77
|
+
""" The targets, i.e., objective values seen by the evolutionary operators."""
|
|
78
|
+
CONSTRAINTS = "CONSTRAINTS"
|
|
79
|
+
""" The constraints of the population. """
|
|
80
|
+
VERBOSE_OUTPUTS = "VERBOSE_OUTPUTS"
|
|
81
|
+
""" Same as POPULATION + OUTPUTS. """
|
|
82
|
+
NEW_EVALUATIONS = "NEW_EVALUATIONS"
|
|
83
|
+
""" The number of new evaluations. """
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class SelectorMessageTopics(Enum):
|
|
87
|
+
"""Topics for messages related to selector operators."""
|
|
88
|
+
|
|
89
|
+
TEST = "TEST"
|
|
90
|
+
""" A message topic used only for testing the selector operators. """
|
|
91
|
+
STATE = "STATE"
|
|
92
|
+
""" The state of the parameters of the selector. """
|
|
93
|
+
INDIVIDUALS = "INDIVIDUALS"
|
|
94
|
+
""" The individuals to select from. """
|
|
95
|
+
OUTPUTS = "OUTPUTS"
|
|
96
|
+
""" The outputs of the individuals. """
|
|
97
|
+
CONSTRAINTS = "CONSTRAINTS"
|
|
98
|
+
""" The constraints of the individuals. """
|
|
99
|
+
SELECTED_INDIVIDUALS = "SELECTED_INDIVIDUALS"
|
|
100
|
+
""" The individuals selected by the selector. """
|
|
101
|
+
SELECTED_OUTPUTS = "SELECTED_OUTPUTS"
|
|
102
|
+
""" The targets of the selected individuals. """
|
|
103
|
+
SELECTED_VERBOSE_OUTPUTS = "SELECTED_VERBOSE_OUTPUTS"
|
|
104
|
+
""" Same as SELECTED_OUTPUTS + SELECTED_INDIVIDUALS"""
|
|
105
|
+
REFERENCE_VECTORS = "REFERENCE_VECTORS"
|
|
106
|
+
""" The reference vectors used in the selection in decomposition-based EMO algorithms. """
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class TerminatorMessageTopics(Enum):
|
|
110
|
+
"""Topics for messages related to terminator operators."""
|
|
111
|
+
|
|
112
|
+
TEST = "TEST"
|
|
113
|
+
""" A message topic used only for testing the terminator operators. """
|
|
114
|
+
STATE = "STATE"
|
|
115
|
+
""" The state of the parameters of the terminator. """
|
|
116
|
+
TERMINATION = "TERMINATION"
|
|
117
|
+
""" The value of the termination condition. """
|
|
118
|
+
GENERATION = "GENERATION"
|
|
119
|
+
""" The current generation number. """
|
|
120
|
+
EVALUATION = "EVALUATION"
|
|
121
|
+
""" The current number of evaluations. """
|
|
122
|
+
MAX_GENERATIONS = "MAX_GENERATIONS"
|
|
123
|
+
""" The maximum number of generations. """
|
|
124
|
+
MAX_EVALUATIONS = "MAX_EVALUATIONS"
|
|
125
|
+
""" The maximum number of evaluations. """
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
MessageTopics = (
|
|
129
|
+
CrossoverMessageTopics
|
|
130
|
+
| MutationMessageTopics
|
|
131
|
+
| EvaluatorMessageTopics
|
|
132
|
+
| GeneratorMessageTopics
|
|
133
|
+
| SelectorMessageTopics
|
|
134
|
+
| TerminatorMessageTopics
|
|
135
|
+
| Literal["ALL"] # Used to indicate that all topics are of interest to a subscriber.
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class BaseMessage(BaseModel):
|
|
140
|
+
"""A message containing an integer value."""
|
|
141
|
+
|
|
142
|
+
topic: MessageTopics = Field(..., description="The topic of the message.")
|
|
143
|
+
""" The topic of the message. """
|
|
144
|
+
source: str = Field(..., description="The source of the message.")
|
|
145
|
+
""" The source of the message. """
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class IntMessage(BaseMessage):
|
|
149
|
+
"""A message containing an integer value."""
|
|
150
|
+
|
|
151
|
+
value: int = Field(..., description="The integer value of the message.")
|
|
152
|
+
""" The integer value of the message. """
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class FloatMessage(BaseMessage):
|
|
156
|
+
"""A message containing a float value."""
|
|
157
|
+
|
|
158
|
+
value: float = Field(..., description="The float value of the message.")
|
|
159
|
+
""" The float value of the message. """
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class StringMessage(BaseMessage):
|
|
163
|
+
"""A message containing a string value."""
|
|
164
|
+
|
|
165
|
+
value: str = Field(..., description="The string value of the message.")
|
|
166
|
+
""" The string value of the message. """
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class BoolMessage(BaseMessage):
|
|
170
|
+
"""A message containing a boolean value."""
|
|
171
|
+
|
|
172
|
+
value: bool = Field(..., description="The boolean value of the message.")
|
|
173
|
+
""" The boolean value of the message. """
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class DictMessage(BaseMessage):
|
|
177
|
+
"""A message containing a dictionary value."""
|
|
178
|
+
|
|
179
|
+
value: dict[str, Any] = Field(..., description="The dictionary value of the message.")
|
|
180
|
+
""" The dictionary value of the message. """
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
class Array2DMessage(BaseMessage):
|
|
184
|
+
"""A message containing a 2D array value, such as a population or a set of objectives."""
|
|
185
|
+
|
|
186
|
+
value: list[list[float]] = Field(..., description="The array value of the message.")
|
|
187
|
+
""" The array value of the message. """
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class PolarsDataFrameMessage(BaseMessage):
|
|
191
|
+
"""A message containing a 2D array value, such as a population or a set of objectives."""
|
|
192
|
+
|
|
193
|
+
value: DataFrame = Field(..., description="The array value of the message.")
|
|
194
|
+
""" The array value of the message. """
|
|
195
|
+
|
|
196
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
197
|
+
|
|
198
|
+
@field_serializer("value")
|
|
199
|
+
def _serialize_value(self, value: DataFrame) -> dict[str, list[int | float]]:
|
|
200
|
+
return value.to_dict(as_series=False)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
class GenericMessage(BaseMessage):
|
|
204
|
+
"""A message containing a generic value."""
|
|
205
|
+
|
|
206
|
+
value: Any = Field(..., description="The generic value of the message.")
|
|
207
|
+
""" The generic value of the message. """
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
Message = (
|
|
211
|
+
IntMessage
|
|
212
|
+
| FloatMessage
|
|
213
|
+
| DictMessage
|
|
214
|
+
| Array2DMessage
|
|
215
|
+
| GenericMessage
|
|
216
|
+
| StringMessage
|
|
217
|
+
| BoolMessage
|
|
218
|
+
| PolarsDataFrameMessage
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
AllowedMessagesAtVerbosity: dict[int, tuple[type[Message], ...]] = {
|
|
222
|
+
0: (),
|
|
223
|
+
1: (IntMessage, FloatMessage, StringMessage, BoolMessage),
|
|
224
|
+
2: (
|
|
225
|
+
IntMessage,
|
|
226
|
+
FloatMessage,
|
|
227
|
+
StringMessage,
|
|
228
|
+
BoolMessage,
|
|
229
|
+
DictMessage,
|
|
230
|
+
Array2DMessage,
|
|
231
|
+
GenericMessage,
|
|
232
|
+
PolarsDataFrameMessage,
|
|
233
|
+
),
|
|
234
|
+
}
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
"""Solver interfaces to the optimization routines found in nevergrad.
|
|
2
|
+
|
|
3
|
+
For more info, see https://facebookresearch.github.io/nevergrad/index.html
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
7
|
+
from typing import Literal
|
|
8
|
+
|
|
9
|
+
import nevergrad as ng
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from desdeo.problem import Problem, SympyEvaluator
|
|
13
|
+
from desdeo.tools.generics import BaseSolver, SolverResults
|
|
14
|
+
|
|
15
|
+
available_nevergrad_optimizers = [
|
|
16
|
+
"NGOpt",
|
|
17
|
+
"TwoPointsDE",
|
|
18
|
+
"PortfolioDiscreteOnePlusOne",
|
|
19
|
+
"OnePlusOne",
|
|
20
|
+
"CMA",
|
|
21
|
+
"TBPSA",
|
|
22
|
+
"PSO",
|
|
23
|
+
"ScrHammersleySearchPlusMiddlePoint",
|
|
24
|
+
"RandomSearch",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class NevergradGenericOptions(BaseModel):
|
|
29
|
+
"""Defines options to be passed to nevergrad's optimization routines."""
|
|
30
|
+
|
|
31
|
+
budget: int = Field(description="The maximum number of allowed function evaluations.", default=100)
|
|
32
|
+
"""The maximum number of allowed function evaluations. Defaults to 100."""
|
|
33
|
+
|
|
34
|
+
num_workers: int = Field(description="The maximum number of allowed parallel evaluations.", default=1)
|
|
35
|
+
"""The maximum number of allowed parallel evaluations. This is currently
|
|
36
|
+
used to define the batch size when evaluating problems. Defaults to 1."""
|
|
37
|
+
|
|
38
|
+
optimizer: Literal[*available_nevergrad_optimizers] = Field(
|
|
39
|
+
description=(
|
|
40
|
+
"The optimizer to be used. Must be one of `NGOpt`, `TwoPointDE`, `PortfolioDiscreteOnePlusOne`, "
|
|
41
|
+
"`OnePlusOne`, `CMA`, `TBPSA`, `PSO`, `ScrHammersleySearchPlusMiddlePoint`, or `RandomSearch`. "
|
|
42
|
+
"Defaults to `NGOpt`."
|
|
43
|
+
),
|
|
44
|
+
default="NGOpt",
|
|
45
|
+
)
|
|
46
|
+
"""The optimizer to be used. Must be one of `NGOpt`, `TwoPointsDE`, `PortfolioDiscreteOnePlusOne`,
|
|
47
|
+
`OnePlusOne`, `CMA`, `TBPSA`, `PSO`, `ScrHammersleySearchPlusMiddlePoint`, or `RandomSearch`.
|
|
48
|
+
Defaults to `NGOpt`."""
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
_default_nevergrad_generic_options = NevergradGenericOptions()
|
|
52
|
+
"""The set of default options for nevergrad's NgOpt optimizer."""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def parse_ng_results(results: dict, problem: Problem, evaluator: SympyEvaluator) -> SolverResults:
|
|
56
|
+
"""Parses the optimization results returned by nevergrad solvers.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
results (dict): the results. A dict with at least the keys
|
|
60
|
+
`recommendation`, which points to a parametrization returned by
|
|
61
|
+
nevergrad solvers, `message` with information about the optimization,
|
|
62
|
+
and `success` indicating whther a recommendation was found successfully
|
|
63
|
+
or not.
|
|
64
|
+
problem (Problem): the problem the results belong to.
|
|
65
|
+
evaluator (GenericEvaluator): the evaluator used to evaluate the problem.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
SolverResults: a pydantic dataclass withthe relevant optimization results.
|
|
69
|
+
"""
|
|
70
|
+
optimal_variables = results["recommendation"].value
|
|
71
|
+
success = results["success"]
|
|
72
|
+
msg = results["message"]
|
|
73
|
+
|
|
74
|
+
results = evaluator.evaluate(optimal_variables)
|
|
75
|
+
|
|
76
|
+
optimal_objectives = {obj.symbol: results[obj.symbol] for obj in problem.objectives}
|
|
77
|
+
|
|
78
|
+
constraint_values = (
|
|
79
|
+
{con.symbol: results[con.symbol] for con in problem.constraints} if problem.constraints is not None else None
|
|
80
|
+
)
|
|
81
|
+
extra_func_values = (
|
|
82
|
+
{extra.symbol: results[extra.symbol] for extra in problem.extra_funcs}
|
|
83
|
+
if problem.extra_funcs is not None
|
|
84
|
+
else None
|
|
85
|
+
)
|
|
86
|
+
scalarization_values = (
|
|
87
|
+
{scal.symbol: results[scal.symbol] for scal in problem.scalarization_funcs}
|
|
88
|
+
if problem.scalarization_funcs is not None
|
|
89
|
+
else None
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
return SolverResults(
|
|
93
|
+
optimal_variables=optimal_variables,
|
|
94
|
+
optimal_objectives=optimal_objectives,
|
|
95
|
+
constraint_values=constraint_values,
|
|
96
|
+
extra_func_values=extra_func_values,
|
|
97
|
+
scalarization_values=scalarization_values,
|
|
98
|
+
success=success,
|
|
99
|
+
message=msg,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class NevergradGenericSolver(BaseSolver):
|
|
104
|
+
"""Creates a solver that utilizes optimizations routines found in the nevergrad library."""
|
|
105
|
+
|
|
106
|
+
def __init__(self, problem: Problem, options: NevergradGenericOptions | None = _default_nevergrad_generic_options):
|
|
107
|
+
"""Creates a solver that utilizes optimizations routines found in the nevergrad library.
|
|
108
|
+
|
|
109
|
+
These solvers are best utilized for black-box, gradient free optimization with
|
|
110
|
+
computationally expensive function calls. Utilizing multiple workers is recommended
|
|
111
|
+
(see `NevergradGenericOptions`) when function calls are heavily I/O bound.
|
|
112
|
+
|
|
113
|
+
See https://facebookresearch.github.io/nevergrad/getting_started.html for further information
|
|
114
|
+
on nevergrad and its solvers.
|
|
115
|
+
|
|
116
|
+
References:
|
|
117
|
+
Rapin, J., & Teytaud, O. (2018). Nevergrad - A gradient-free
|
|
118
|
+
optimization platform. GitHub.
|
|
119
|
+
https://GitHub.com/FacebookResearch/Nevergrad
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
problem (Problem): the problem to be solved.
|
|
123
|
+
options (NgOptOptions | None): options to be passes to the solver.
|
|
124
|
+
If none, `_default_ng_ngopt_options` are used. Defaults to None.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Callable[[str], SolverResults]: returns a callable function that takes
|
|
128
|
+
as its argument one of the symbols defined for a function expression in
|
|
129
|
+
problem.
|
|
130
|
+
"""
|
|
131
|
+
self.problem = problem
|
|
132
|
+
self.options = options if options is not None else _default_nevergrad_generic_options
|
|
133
|
+
self.evaluator = SympyEvaluator(problem)
|
|
134
|
+
|
|
135
|
+
def solve(self, target: str) -> SolverResults:
|
|
136
|
+
"""Solve the problem for the given target.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
target (str): the symbol of the objective function to be optimized.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
SolverResults: the results of the optimization.
|
|
143
|
+
"""
|
|
144
|
+
parametrization = ng.p.Dict(
|
|
145
|
+
**{
|
|
146
|
+
var.symbol: ng.p.Scalar(
|
|
147
|
+
# sets the initial value of the variables, if None, then the
|
|
148
|
+
# mid-point of the lower and upper bounds is chosen as the
|
|
149
|
+
# initial value.
|
|
150
|
+
init=var.initial_value if var.initial_value is not None else (var.lowerbound + var.upperbound) / 2
|
|
151
|
+
).set_bounds(var.lowerbound, var.upperbound)
|
|
152
|
+
for var in self.problem.variables
|
|
153
|
+
}
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
optimizer = ng.optimizers.registry[self.options.optimizer](
|
|
157
|
+
parametrization=parametrization, **self.options.model_dump(exclude="optimizer")
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
constraint_symbols = (
|
|
161
|
+
None if self.problem.constraints is None else [con.symbol for con in self.problem.constraints]
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
if optimizer.num_workers == 1:
|
|
166
|
+
# single thread
|
|
167
|
+
recommendation = optimizer.minimize(
|
|
168
|
+
lambda xs, t=target: self.evaluator.evaluate_target(xs, t),
|
|
169
|
+
constraint_violation=[
|
|
170
|
+
lambda xs, t=con_t: self.evaluator.evaluate_target(xs, t) for con_t in constraint_symbols
|
|
171
|
+
]
|
|
172
|
+
if constraint_symbols is not None
|
|
173
|
+
else None,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
elif optimizer.num_workers > 1:
|
|
177
|
+
# multiple processors
|
|
178
|
+
with ThreadPoolExecutor(max_workers=optimizer.num_workers) as executor:
|
|
179
|
+
recommendation = optimizer.minimize(
|
|
180
|
+
lambda xs, t=target: self.evaluator.evaluate_target(xs, t),
|
|
181
|
+
constraint_violation=[
|
|
182
|
+
lambda xs, t=con_t: self.evaluator.evaluate_target(xs, t) for con_t in constraint_symbols
|
|
183
|
+
]
|
|
184
|
+
if constraint_symbols is not None
|
|
185
|
+
else None,
|
|
186
|
+
executor=executor,
|
|
187
|
+
batch_mode=False,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
msg = f"Recommendation found by {self.options.optimizer}."
|
|
191
|
+
success = True
|
|
192
|
+
|
|
193
|
+
except Exception as e:
|
|
194
|
+
msg = f"{self.options.optimizer} failed. Possible reason: {e}"
|
|
195
|
+
success = False
|
|
196
|
+
|
|
197
|
+
result = {"recommendation": recommendation, "message": msg, "success": success}
|
|
198
|
+
|
|
199
|
+
return parse_ng_results(result, self.problem, self.evaluator)
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
"""This module contains functions for non-dominated sorting of solutions."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from numba import njit # type: ignore
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@njit()
|
|
8
|
+
def dominates(x: np.ndarray, y: np.ndarray) -> bool:
|
|
9
|
+
"""Returns true if x dominates y.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
x (np.ndarray): First solution. Should be a 1-D array of numerics.
|
|
13
|
+
y (np.ndarray): Second solution. Should be the same shape as x.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
bool: True if x dominates y, false otherwise.
|
|
17
|
+
"""
|
|
18
|
+
dom = False
|
|
19
|
+
for i in range(len(x)):
|
|
20
|
+
if x[i] > y[i]:
|
|
21
|
+
return False
|
|
22
|
+
elif x[i] < y[i]:
|
|
23
|
+
dom = True
|
|
24
|
+
return dom
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@njit()
|
|
28
|
+
def non_dominated(data: np.ndarray) -> np.ndarray:
|
|
29
|
+
"""Finds the non-dominated front from a population of solutions.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
data (np.ndarray): 2-D array of solutions, with each row being a single solution.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
np.ndarray: Boolean array of same length as number of solutions (rows). The value is
|
|
36
|
+
true if corresponding solution is non-dominated. False otherwise
|
|
37
|
+
"""
|
|
38
|
+
num_solutions = len(data)
|
|
39
|
+
index = np.zeros(num_solutions, dtype=np.bool_)
|
|
40
|
+
index[0] = True
|
|
41
|
+
for i in range(1, num_solutions):
|
|
42
|
+
index[i] = True
|
|
43
|
+
for j in range(i):
|
|
44
|
+
if not index[j]:
|
|
45
|
+
continue
|
|
46
|
+
if dominates(data[i], data[j]):
|
|
47
|
+
index[j] = False
|
|
48
|
+
elif dominates(data[j], data[i]):
|
|
49
|
+
index[i] = False
|
|
50
|
+
break
|
|
51
|
+
return index
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@njit()
|
|
55
|
+
def fast_non_dominated_sort(data: np.ndarray) -> np.ndarray:
|
|
56
|
+
"""Conduct fast non-dominated sorting on a population of solutions.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
data (np.ndarray): 2-D array of solutions, with each row being a single solution.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
np.ndarray: n x f boolean array. n is the number of solutions, f is the number of fronts.
|
|
63
|
+
The value of an array element is true if the corresponding solution id (column) belongs in
|
|
64
|
+
the corresponding front (row).
|
|
65
|
+
"""
|
|
66
|
+
num_solutions = len(data)
|
|
67
|
+
indices = np.arange(num_solutions)
|
|
68
|
+
taken = np.zeros(num_solutions, dtype=np.bool_)
|
|
69
|
+
fronts = np.zeros((num_solutions, num_solutions), dtype=np.bool_)
|
|
70
|
+
|
|
71
|
+
for i in indices:
|
|
72
|
+
current_front = non_dominated(data[~taken])
|
|
73
|
+
|
|
74
|
+
current_front_all = np.zeros(num_solutions, dtype=np.bool_)
|
|
75
|
+
current_front_all[~taken] = current_front
|
|
76
|
+
fronts[i] = current_front_all
|
|
77
|
+
|
|
78
|
+
taken = taken + fronts[i]
|
|
79
|
+
if not fronts[i].any():
|
|
80
|
+
# if the current front is empty or if all the solutions have been sorted, stop
|
|
81
|
+
break
|
|
82
|
+
return fronts[:i]
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def fast_non_dominated_sort_indices(data: np.ndarray) -> list[np.ndarray]:
|
|
86
|
+
"""Conduct fast non-dominated sorting on a population of solutions.
|
|
87
|
+
|
|
88
|
+
This function returns identical results as `fast_non_dominated_sort`, but in a different format.
|
|
89
|
+
This function returns an array of solution indices for each front, packed in a list.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
data (np.ndarray): 2-D array of solutions, with each row being a single solution.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
list[np.ndarray]: A list with f elements where f is the number of fronts in the data,
|
|
96
|
+
arranged in ascending order. Each element is a numpy array of the indices of solutions
|
|
97
|
+
belonging to the corresponding front.
|
|
98
|
+
"""
|
|
99
|
+
fronts = fast_non_dominated_sort(data)
|
|
100
|
+
return [np.where(fronts[i])[0] for i in range(len(fronts))]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@njit()
|
|
104
|
+
def non_dominated_merge(set1: np.ndarray, set2: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
|
105
|
+
"""Merge two sets of non-dominated solutions.
|
|
106
|
+
|
|
107
|
+
This is a slightly more efficient way to merge two sets of solutions such that the resulting
|
|
108
|
+
set only contains non-dominated solutions from the two sets. This function assumes that the
|
|
109
|
+
two sets already only contain non-dominated solutions. I.e., each solution in each set is non-dominated
|
|
110
|
+
with respect to all other solutions in the same set. However, the solutions in the two sets may not be
|
|
111
|
+
non-dominated with respect to each other.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
set1 (np.ndarray): 2-D array of solutions, with each row being a single solution.
|
|
115
|
+
set2 (np.ndarray): 2-D array of solutions, with each row being a single solution.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
tuple[np.ndarray, np.ndarray]: A tuple of two mask arrays. The first mask array is for set1 and the
|
|
119
|
+
second mask array is for set2. The value of an element in the mask array is True if the corresponding
|
|
120
|
+
solution is non-dominated in the merged set. False otherwise.
|
|
121
|
+
"""
|
|
122
|
+
# Masks to keep track of which solutions are non-dominated. Default is all True.
|
|
123
|
+
set1_mask = np.ones(len(set1), dtype=np.bool_)
|
|
124
|
+
set2_mask = np.ones(len(set2), dtype=np.bool_)
|
|
125
|
+
|
|
126
|
+
for i in range(len(set1)):
|
|
127
|
+
for j in range(len(set2)):
|
|
128
|
+
if dominates(set1[i], set2[j]):
|
|
129
|
+
set2_mask[j] = False
|
|
130
|
+
elif dominates(set2[j], set1[i]):
|
|
131
|
+
set1_mask[i] = False
|
|
132
|
+
|
|
133
|
+
return set1_mask, set2_mask
|