desdeo 1.1.3__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- desdeo/__init__.py +8 -8
- desdeo/api/README.md +73 -0
- desdeo/api/__init__.py +15 -0
- desdeo/api/app.py +40 -0
- desdeo/api/config.py +69 -0
- desdeo/api/config.toml +53 -0
- desdeo/api/db.py +25 -0
- desdeo/api/db_init.py +79 -0
- desdeo/api/db_models.py +164 -0
- desdeo/api/malaga_db_init.py +27 -0
- desdeo/api/models/__init__.py +66 -0
- desdeo/api/models/archive.py +34 -0
- desdeo/api/models/preference.py +90 -0
- desdeo/api/models/problem.py +507 -0
- desdeo/api/models/reference_point_method.py +18 -0
- desdeo/api/models/session.py +46 -0
- desdeo/api/models/state.py +96 -0
- desdeo/api/models/user.py +51 -0
- desdeo/api/routers/_NAUTILUS.py +245 -0
- desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
- desdeo/api/routers/_NIMBUS.py +762 -0
- desdeo/api/routers/__init__.py +5 -0
- desdeo/api/routers/problem.py +110 -0
- desdeo/api/routers/reference_point_method.py +117 -0
- desdeo/api/routers/session.py +76 -0
- desdeo/api/routers/test.py +16 -0
- desdeo/api/routers/user_authentication.py +366 -0
- desdeo/api/schema.py +94 -0
- desdeo/api/tests/__init__.py +0 -0
- desdeo/api/tests/conftest.py +59 -0
- desdeo/api/tests/test_models.py +701 -0
- desdeo/api/tests/test_routes.py +216 -0
- desdeo/api/utils/database.py +274 -0
- desdeo/api/utils/logger.py +29 -0
- desdeo/core.py +27 -0
- desdeo/emo/__init__.py +29 -0
- desdeo/emo/hooks/archivers.py +172 -0
- desdeo/emo/methods/EAs.py +418 -0
- desdeo/emo/methods/__init__.py +0 -0
- desdeo/emo/methods/bases.py +59 -0
- desdeo/emo/operators/__init__.py +1 -0
- desdeo/emo/operators/crossover.py +780 -0
- desdeo/emo/operators/evaluator.py +118 -0
- desdeo/emo/operators/generator.py +356 -0
- desdeo/emo/operators/mutation.py +1053 -0
- desdeo/emo/operators/selection.py +1036 -0
- desdeo/emo/operators/termination.py +178 -0
- desdeo/explanations/__init__.py +6 -0
- desdeo/explanations/explainer.py +100 -0
- desdeo/explanations/utils.py +90 -0
- desdeo/mcdm/__init__.py +19 -0
- desdeo/mcdm/nautili.py +345 -0
- desdeo/mcdm/nautilus.py +477 -0
- desdeo/mcdm/nautilus_navigator.py +655 -0
- desdeo/mcdm/nimbus.py +417 -0
- desdeo/mcdm/pareto_navigator.py +269 -0
- desdeo/mcdm/reference_point_method.py +116 -0
- desdeo/problem/__init__.py +79 -0
- desdeo/problem/evaluator.py +561 -0
- desdeo/problem/gurobipy_evaluator.py +562 -0
- desdeo/problem/infix_parser.py +341 -0
- desdeo/problem/json_parser.py +944 -0
- desdeo/problem/pyomo_evaluator.py +468 -0
- desdeo/problem/schema.py +1808 -0
- desdeo/problem/simulator_evaluator.py +298 -0
- desdeo/problem/sympy_evaluator.py +244 -0
- desdeo/problem/testproblems/__init__.py +73 -0
- desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
- desdeo/problem/testproblems/dtlz2_problem.py +102 -0
- desdeo/problem/testproblems/forest_problem.py +275 -0
- desdeo/problem/testproblems/knapsack_problem.py +163 -0
- desdeo/problem/testproblems/mcwb_problem.py +831 -0
- desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
- desdeo/problem/testproblems/momip_problem.py +172 -0
- desdeo/problem/testproblems/nimbus_problem.py +143 -0
- desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
- desdeo/problem/testproblems/re_problem.py +492 -0
- desdeo/problem/testproblems/river_pollution_problem.py +434 -0
- desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
- desdeo/problem/testproblems/simple_problem.py +351 -0
- desdeo/problem/testproblems/simulator_problem.py +92 -0
- desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
- desdeo/problem/testproblems/zdt_problem.py +271 -0
- desdeo/problem/utils.py +245 -0
- desdeo/tools/GenerateReferencePoints.py +181 -0
- desdeo/tools/__init__.py +102 -0
- desdeo/tools/generics.py +145 -0
- desdeo/tools/gurobipy_solver_interfaces.py +258 -0
- desdeo/tools/indicators_binary.py +11 -0
- desdeo/tools/indicators_unary.py +375 -0
- desdeo/tools/interaction_schema.py +38 -0
- desdeo/tools/intersection.py +54 -0
- desdeo/tools/iterative_pareto_representer.py +99 -0
- desdeo/tools/message.py +234 -0
- desdeo/tools/ng_solver_interfaces.py +199 -0
- desdeo/tools/non_dominated_sorting.py +133 -0
- desdeo/tools/patterns.py +281 -0
- desdeo/tools/proximal_solver.py +99 -0
- desdeo/tools/pyomo_solver_interfaces.py +464 -0
- desdeo/tools/reference_vectors.py +462 -0
- desdeo/tools/scalarization.py +3138 -0
- desdeo/tools/scipy_solver_interfaces.py +454 -0
- desdeo/tools/score_bands.py +464 -0
- desdeo/tools/utils.py +320 -0
- desdeo/utopia_stuff/__init__.py +0 -0
- desdeo/utopia_stuff/data/1.json +15 -0
- desdeo/utopia_stuff/data/2.json +13 -0
- desdeo/utopia_stuff/data/3.json +15 -0
- desdeo/utopia_stuff/data/4.json +17 -0
- desdeo/utopia_stuff/data/5.json +15 -0
- desdeo/utopia_stuff/from_json.py +40 -0
- desdeo/utopia_stuff/reinit_user.py +38 -0
- desdeo/utopia_stuff/utopia_db_init.py +212 -0
- desdeo/utopia_stuff/utopia_problem.py +403 -0
- desdeo/utopia_stuff/utopia_problem_old.py +415 -0
- desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
- desdeo-2.0.0.dist-info/LICENSE +21 -0
- desdeo-2.0.0.dist-info/METADATA +168 -0
- desdeo-2.0.0.dist-info/RECORD +120 -0
- {desdeo-1.1.3.dist-info → desdeo-2.0.0.dist-info}/WHEEL +1 -1
- desdeo-1.1.3.dist-info/METADATA +0 -18
- desdeo-1.1.3.dist-info/RECORD +0 -4
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
"""Evaluators are defined to evaluate simulator based and surrogate based objectives, constraints and extras."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess
|
|
5
|
+
import sys
|
|
6
|
+
from inspect import getfullargspec
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import joblib
|
|
10
|
+
import numpy as np
|
|
11
|
+
import polars as pl
|
|
12
|
+
# import skops.io as sio
|
|
13
|
+
|
|
14
|
+
from desdeo.problem import (
|
|
15
|
+
ObjectiveTypeEnum,
|
|
16
|
+
PolarsEvaluator,
|
|
17
|
+
PolarsEvaluatorModesEnum,
|
|
18
|
+
Problem,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class EvaluatorError(Exception):
|
|
23
|
+
"""Error raised when exceptions are encountered in an Evaluator."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Evaluator:
|
|
27
|
+
"""A class for creating evaluators for simulator based and surrogate based objectives, constraints and extras."""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self, problem: Problem, params: dict[str, dict] | None = None, surrogate_paths: dict[str, Path] | None = None
|
|
31
|
+
):
|
|
32
|
+
"""Creating an evaluator for simulator based and surrogate based objectives, constraints and extras.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
problem (Problem): The problem as a pydantic 'Problem' data class.
|
|
36
|
+
params (dict[str, dict], optional): Parameters for the different simulators used in the problem.
|
|
37
|
+
Given as dict with the simulators' symbols as keys and the corresponding simulator parameters
|
|
38
|
+
as a dict as values. Defaults to None.
|
|
39
|
+
surrogate_paths (dict[str, Path], optional): A dictionary where the keys are the names of the objectives,
|
|
40
|
+
constraints and extra functions and the values are the paths to the surrogate models saved on disk.
|
|
41
|
+
The names of the objectives, constraints and extra functions should match the names of the objectives,
|
|
42
|
+
constraints and extra functions in the problem JSON. Defaults to None.
|
|
43
|
+
"""
|
|
44
|
+
self.problem = problem
|
|
45
|
+
# store the symbol and min or max multiplier as well (symbol, min/max multiplier [1 | -1])
|
|
46
|
+
self.objective_mix_max_mult = [
|
|
47
|
+
(objective.symbol, -1 if objective.maximize else 1) for objective in problem.objectives
|
|
48
|
+
]
|
|
49
|
+
# Gather symbols objectives of different types into their own lists
|
|
50
|
+
self.analytical_symbols = [
|
|
51
|
+
obj.symbol
|
|
52
|
+
for obj in list(filter(lambda x: x.objective_type == ObjectiveTypeEnum.analytical, problem.objectives))
|
|
53
|
+
]
|
|
54
|
+
self.data_based_symbols = [
|
|
55
|
+
obj.symbol for obj in problem.objectives if obj.objective_type == ObjectiveTypeEnum.data_based
|
|
56
|
+
]
|
|
57
|
+
self.simulator_symbols = [
|
|
58
|
+
obj.symbol
|
|
59
|
+
for obj in list(filter(lambda x: x.objective_type == ObjectiveTypeEnum.simulator, problem.objectives))
|
|
60
|
+
]
|
|
61
|
+
self.surrogate_symbols = [
|
|
62
|
+
obj.symbol
|
|
63
|
+
for obj in list(filter(lambda x: x.objective_type == ObjectiveTypeEnum.surrogate, problem.objectives))
|
|
64
|
+
]
|
|
65
|
+
# Gather any constraints' symbols
|
|
66
|
+
if problem.constraints is not None:
|
|
67
|
+
self.analytical_symbols = self.analytical_symbols + [
|
|
68
|
+
con.symbol for con in list(filter(lambda x: x.func is not None, problem.constraints))
|
|
69
|
+
]
|
|
70
|
+
self.simulator_symbols = self.simulator_symbols + [
|
|
71
|
+
con.symbol for con in list(filter(lambda x: x.simulator_path is not None, problem.constraints))
|
|
72
|
+
]
|
|
73
|
+
self.surrogate_symbols = self.surrogate_symbols + [
|
|
74
|
+
con.symbol for con in list(filter(lambda x: x.surrogates is not None, problem.constraints))
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
# Gather any extra functions' symbols
|
|
78
|
+
if problem.extra_funcs is not None:
|
|
79
|
+
self.analytical_symbols = self.analytical_symbols + [
|
|
80
|
+
extra.symbol for extra in list(filter(lambda x: x.func is not None, problem.extra_funcs))
|
|
81
|
+
]
|
|
82
|
+
self.simulator_symbols = self.simulator_symbols + [
|
|
83
|
+
extra.symbol for extra in list(filter(lambda x: x.simulator_path is not None, problem.extra_funcs))
|
|
84
|
+
]
|
|
85
|
+
self.surrogate_symbols = self.surrogate_symbols + [
|
|
86
|
+
extra.symbol for extra in list(filter(lambda x: x.surrogates is not None, problem.extra_funcs))
|
|
87
|
+
]
|
|
88
|
+
|
|
89
|
+
# Gather all the symbols of objectives, constraints and extra functions
|
|
90
|
+
self.problem_symbols = (
|
|
91
|
+
self.analytical_symbols + self.data_based_symbols + self.simulator_symbols + self.surrogate_symbols
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Gather the possible simulators
|
|
95
|
+
self.simulators = problem.simulators if problem.simulators is not None else []
|
|
96
|
+
# Gather the possibly given parameters
|
|
97
|
+
self.params = {}
|
|
98
|
+
for sim in self.simulators:
|
|
99
|
+
sim_params = params.get(sim.name, {}) if params is not None else {}
|
|
100
|
+
if sim.parameter_options is not None:
|
|
101
|
+
for key in sim.parameter_options:
|
|
102
|
+
sim_params[key] = sim.parameter_options[key]
|
|
103
|
+
self.params[sim.name] = sim_params
|
|
104
|
+
|
|
105
|
+
self.surrogates = {}
|
|
106
|
+
if surrogate_paths is not None:
|
|
107
|
+
self._load_surrogates(surrogate_paths)
|
|
108
|
+
else:
|
|
109
|
+
self._load_surrogates()
|
|
110
|
+
|
|
111
|
+
if len(self.surrogate_symbols) > 0:
|
|
112
|
+
missing_surrogates = []
|
|
113
|
+
for symbol in self.surrogate_symbols:
|
|
114
|
+
if symbol not in self.surrogates:
|
|
115
|
+
missing_surrogates.append(symbol)
|
|
116
|
+
|
|
117
|
+
if len(missing_surrogates) > 0:
|
|
118
|
+
raise EvaluatorError(f"Some surrogates missing: {missing_surrogates}.")
|
|
119
|
+
|
|
120
|
+
def _evaluate_simulator(self, xs: dict[str, list[int | float]]) -> pl.DataFrame:
|
|
121
|
+
"""Evaluate the problem for the given decision variables using the problem's simulators.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
xs (dict[str, list[int | float]]): The decision variables for which the functions are to be evaluated.
|
|
125
|
+
Given as a dictionary with the decision variable symbols as keys and a list of decision variable values
|
|
126
|
+
as the values. The length of the lists is the number of samples and each list should have the same
|
|
127
|
+
length (same number of samples).
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
pl.DataFrame: The objective, constraint and extra function values for the given decision variables as
|
|
131
|
+
a polars dataframe. The symbols of the objectives, constraints and extra functions are the column names
|
|
132
|
+
and the length of the columns is the number of samples. Will return those objective, constraint and
|
|
133
|
+
extra function values that are gained from simulators listed in the problem object.
|
|
134
|
+
"""
|
|
135
|
+
res_df = pl.DataFrame()
|
|
136
|
+
for sim in self.simulators:
|
|
137
|
+
# gather the possible parameters for the simulator
|
|
138
|
+
params = self.params.get(sim.name, {})
|
|
139
|
+
# call the simulator with the decision variable values and parameters as dicts
|
|
140
|
+
res = subprocess.run(
|
|
141
|
+
[sys.executable, sim.file, "-d", str(xs), "-p", str(params)], capture_output=True, text=True
|
|
142
|
+
)
|
|
143
|
+
if res.returncode == 0:
|
|
144
|
+
# gather the simulation results (a dict) into the results dataframe
|
|
145
|
+
res_df = res_df.hstack(pl.DataFrame(json.loads(res.stdout)))
|
|
146
|
+
else:
|
|
147
|
+
raise EvaluatorError(res.stderr)
|
|
148
|
+
|
|
149
|
+
# Evaluate the minimization form of the objective functions
|
|
150
|
+
min_obj_columns = pl.DataFrame()
|
|
151
|
+
for symbol, min_max_mult in self.objective_mix_max_mult:
|
|
152
|
+
if symbol in res_df.columns:
|
|
153
|
+
min_obj_columns = min_obj_columns.hstack(
|
|
154
|
+
res_df.select((min_max_mult * pl.col(f"{symbol}")).alias(f"{symbol}_min"))
|
|
155
|
+
)
|
|
156
|
+
return res_df.hstack(min_obj_columns)
|
|
157
|
+
|
|
158
|
+
def _evaluate_surrogates(self, xs: dict[str, list[int | float]]) -> pl.DataFrame:
|
|
159
|
+
"""Evaluate the problem for the given decision variables using the surrogate models.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
xs (dict[str, list[int | float]]): The decision variables for which the functions are to be evaluated.
|
|
163
|
+
Given as a dictionary with the decision variable symbols as keys and a list of decision variable values
|
|
164
|
+
as the values. The length of the lists is the number of samples and each list should have the same
|
|
165
|
+
length (same number of samples).
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
pl.DataFrame: The values of the evaluated objectives, constraints and extra functions as a polars
|
|
169
|
+
dataframe. The uncertainty prediction values are also returned. If a model does not provide
|
|
170
|
+
uncertainty predictions, then they are set as NaN.
|
|
171
|
+
"""
|
|
172
|
+
res = pl.DataFrame()
|
|
173
|
+
var = np.array([value for _, value in xs.items()]).T # has to be transpose (at least for sklearn models)
|
|
174
|
+
for symbol in self.surrogates:
|
|
175
|
+
# get a list of args accepted by the model's predict function
|
|
176
|
+
accepted_args = getfullargspec(self.surrogates[symbol].predict).args
|
|
177
|
+
# if "return_std" accepted, gather the uncertainty predictions as well
|
|
178
|
+
if "return_std" in accepted_args:
|
|
179
|
+
value, uncertainty = self.surrogates[symbol].predict(var, return_std=True)
|
|
180
|
+
# otherwise, set the uncertainties as NaN
|
|
181
|
+
else:
|
|
182
|
+
value = self.surrogates[symbol].predict(var)
|
|
183
|
+
uncertainty = np.full(np.shape(value), np.nan)
|
|
184
|
+
# add the objects, constraints and extra functions into the polars dataframe
|
|
185
|
+
# values go into columns with the symbol as the column names
|
|
186
|
+
res = res.with_columns(pl.Series(value).alias(symbol))
|
|
187
|
+
# uncertainties go into columns with {symbol}_uncert as the column names
|
|
188
|
+
res = res.with_columns(pl.Series(uncertainty).alias(f"{symbol}_uncert"))
|
|
189
|
+
|
|
190
|
+
# Evaluate the minimization form of the objective functions
|
|
191
|
+
min_obj_columns = pl.DataFrame()
|
|
192
|
+
for symbol, min_max_mult in self.objective_mix_max_mult:
|
|
193
|
+
if symbol in res.columns:
|
|
194
|
+
min_obj_columns = min_obj_columns.hstack(
|
|
195
|
+
res.select((min_max_mult * pl.col(f"{symbol}")).alias(f"{symbol}_min"))
|
|
196
|
+
)
|
|
197
|
+
return res.hstack(min_obj_columns)
|
|
198
|
+
|
|
199
|
+
def _load_surrogates(self, surrogate_paths: dict[str, Path] | None = None):
|
|
200
|
+
"""Load the surrogate models from disk and store them within the evaluator.
|
|
201
|
+
|
|
202
|
+
This is used during initialization of the evaluator or when the analyst wants to replace the current surrogate
|
|
203
|
+
models with other models. However if a new model is trained after initialization of the evaluator, the problem
|
|
204
|
+
JSON should be updated with the new model paths and the evaluator should be re-initialized. This can happen
|
|
205
|
+
with any solver that does model management.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
surrogate_paths (dict[str, Path]): A dictionary where the keys are the names of the objectives, constraints
|
|
209
|
+
and extra functions and the values are the paths to the surrogate models saved on disk. The names of
|
|
210
|
+
the objectives should match the names of the objectives in the problem JSON. At the moment the supported
|
|
211
|
+
file format is .skops (through skops.io). TODO: if skops.io used, should be added to pyproject.toml.
|
|
212
|
+
"""
|
|
213
|
+
if surrogate_paths is not None:
|
|
214
|
+
for symbol in surrogate_paths:
|
|
215
|
+
with Path.open(f"{surrogate_paths[symbol]}", "rb") as file:
|
|
216
|
+
self.surrogates[symbol] = joblib.load(file)
|
|
217
|
+
"""unknown_types = sio.get_untrusted_types(file=file)
|
|
218
|
+
if len(unknown_types) == 0:
|
|
219
|
+
self.surrogates[symbol] = sio.load(file, unknown_types)
|
|
220
|
+
else: # TODO: if there are unknown types they should be checked
|
|
221
|
+
self.surrogates[symbol] = sio.load(file, unknown_types)
|
|
222
|
+
#raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
|
|
223
|
+
else:
|
|
224
|
+
# check each surrogate based objective, constraint and extra function for surrogate path
|
|
225
|
+
for obj in self.problem.objectives:
|
|
226
|
+
if obj.surrogates is not None:
|
|
227
|
+
with Path.open(f"{obj.surrogates[0]}", "rb") as file:
|
|
228
|
+
self.surrogates[obj.symbol] = joblib.load(file)
|
|
229
|
+
"""unknown_types = sio.get_untrusted_types(file=file)
|
|
230
|
+
if len(unknown_types) == 0:
|
|
231
|
+
self.surrogates[obj.symbol] = sio.load(file, unknown_types)
|
|
232
|
+
else: # TODO: if there are unknown types they should be checked
|
|
233
|
+
self.surrogates[obj.symbol] = sio.load(file, unknown_types)
|
|
234
|
+
#raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
|
|
235
|
+
for con in self.problem.constraints or []: # if there are no constraints, an empty list is used
|
|
236
|
+
if con.surrogates is not None:
|
|
237
|
+
with Path.open(f"{con.surrogates[0]}", "rb") as file:
|
|
238
|
+
self.surrogates[con.symbol] = joblib.load(file)
|
|
239
|
+
"""unknown_types = sio.get_untrusted_types(file=file)
|
|
240
|
+
if len(unknown_types) == 0:
|
|
241
|
+
self.surrogates[con.symbol] = sio.load(file, unknown_types)
|
|
242
|
+
else: # TODO: if there are unknown types they should be checked
|
|
243
|
+
self.surrogates[con.symbol] = sio.load(file, unknown_types)
|
|
244
|
+
#raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
|
|
245
|
+
for extra in self.problem.extra_funcs or []: # if there are no extra functions, an empty list is used
|
|
246
|
+
if extra.surrogates is not None:
|
|
247
|
+
with Path.open(f"{extra.surrogates[0]}", "rb") as file:
|
|
248
|
+
self.surrogates[extra.symbol] = joblib.load(file)
|
|
249
|
+
"""unknown_types = sio.get_untrusted_types(file=file)
|
|
250
|
+
if len(unknown_types) == 0:
|
|
251
|
+
self.surrogates[extra.symbol] = sio.load(file, unknown_types)
|
|
252
|
+
else: # TODO: if there are unknown types they should be checked
|
|
253
|
+
self.surrogates[extra.symbol] = sio.load(file, unknown_types)
|
|
254
|
+
#raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
|
|
255
|
+
|
|
256
|
+
def evaluate(self, xs: dict[str, list[int | float]], flat: bool = False) -> pl.DataFrame:
|
|
257
|
+
"""Evaluate the functions for the given decision variables.
|
|
258
|
+
|
|
259
|
+
Evaluates analytical, simulation based and surrogate based functions. For now, the evaluator assumes that there
|
|
260
|
+
are no data based objectives.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
xs (dict[str, list[int | float]]): The decision variables for which the functions are to be evaluated.
|
|
264
|
+
Given as a dictionary with the decision variable symbols as keys and a list of decision variable values
|
|
265
|
+
as the values. The length of the lists is the number of samples and each list should have the same
|
|
266
|
+
length (same number of samples).
|
|
267
|
+
flat (bool, optional): whether the valuation is done using flattened variables or not. Defaults to False.
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
pl.DataFrame: polars dataframe with the evaluated function values.
|
|
271
|
+
"""
|
|
272
|
+
# TODO (@gialmisi): Make work with polars dataframes as well in addition to dict.
|
|
273
|
+
# See, e.g., PolarsEvaluator._polars_evaluate. Then, remove the arg `flat`.
|
|
274
|
+
res = pl.DataFrame()
|
|
275
|
+
|
|
276
|
+
# Evaluate the analytical functions
|
|
277
|
+
if len(self.analytical_symbols + self.data_based_symbols) > 0:
|
|
278
|
+
polars_evaluator = PolarsEvaluator(self.problem, evaluator_mode=PolarsEvaluatorModesEnum.mixed)
|
|
279
|
+
analytical_values = (
|
|
280
|
+
polars_evaluator._polars_evaluate(xs) if not flat else polars_evaluator._polars_evaluate_flat(xs)
|
|
281
|
+
)
|
|
282
|
+
res = res.hstack(analytical_values)
|
|
283
|
+
|
|
284
|
+
# Evaluate the simulator based functions
|
|
285
|
+
if len(self.simulator_symbols) > 0:
|
|
286
|
+
simulator_values = self._evaluate_simulator(xs)
|
|
287
|
+
res = res.hstack(simulator_values)
|
|
288
|
+
|
|
289
|
+
# Evaluate the surrogate based functions
|
|
290
|
+
if len(self.surrogate_symbols) > 0:
|
|
291
|
+
surrogate_values = self._evaluate_surrogates(xs)
|
|
292
|
+
res = res.hstack(surrogate_values)
|
|
293
|
+
|
|
294
|
+
# Check that everything is evaluated
|
|
295
|
+
for symbol in self.problem_symbols:
|
|
296
|
+
if symbol not in res.columns:
|
|
297
|
+
raise EvaluatorError(f"{symbol} not evaluated.")
|
|
298
|
+
return res
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""Implements and evaluator based on sympy expressions."""
|
|
2
|
+
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
|
|
5
|
+
import sympy as sp
|
|
6
|
+
|
|
7
|
+
from desdeo.problem.evaluator import variable_dimension_enumerate
|
|
8
|
+
from desdeo.problem.json_parser import FormatEnum, MathParser
|
|
9
|
+
from desdeo.problem.schema import Problem
|
|
10
|
+
|
|
11
|
+
SUPPORTED_VAR_DIMENSIONS = ["scalar"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SympyEvaluatorError(Exception):
|
|
15
|
+
"""Raised when an exception with a Sympy evaluator is encountered."""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SympyEvaluator:
|
|
19
|
+
"""Defines an evaluator that can be used to evaluate instances of Problem utilizing sympy."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, problem: Problem):
|
|
22
|
+
"""Initializes the evaluator.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
problem (Problem): the problem to be evaluated.
|
|
26
|
+
"""
|
|
27
|
+
if variable_dimension_enumerate(problem) not in SUPPORTED_VAR_DIMENSIONS:
|
|
28
|
+
msg = "SymPy evaluator does not yet support tensors."
|
|
29
|
+
raise SympyEvaluatorError(msg)
|
|
30
|
+
|
|
31
|
+
# Collect all the symbols and expressions in the problem
|
|
32
|
+
parser = MathParser(to_format=FormatEnum.sympy)
|
|
33
|
+
|
|
34
|
+
self.variable_symbols = [var.symbol for var in problem.variables]
|
|
35
|
+
self.constant_expressions = (
|
|
36
|
+
{const.symbol: parser.parse(const.value) for const in problem.constants}
|
|
37
|
+
if problem.constants is not None
|
|
38
|
+
else None
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
self.extra_expressions = (
|
|
42
|
+
{extra.symbol: parser.parse(extra.func) for extra in problem.extra_funcs}
|
|
43
|
+
if problem.extra_funcs is not None
|
|
44
|
+
else None
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
self.objective_expressions = {obj.symbol: parser.parse(obj.func) for obj in problem.objectives}
|
|
48
|
+
|
|
49
|
+
self.constraint_expressions = (
|
|
50
|
+
{con.symbol: parser.parse(con.func) for con in problem.constraints}
|
|
51
|
+
if problem.constraints is not None
|
|
52
|
+
else None
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
self.scalarization_expressions = (
|
|
56
|
+
{scal.symbol: parser.parse(scal.func) for scal in problem.scalarization_funcs}
|
|
57
|
+
if problem.scalarization_funcs is not None
|
|
58
|
+
else None
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# replace symbols and create lambda functions ready to be called
|
|
62
|
+
# replace constants in extra functions, if they exist
|
|
63
|
+
if self.extra_expressions is not None:
|
|
64
|
+
_extra_expressions = (
|
|
65
|
+
{
|
|
66
|
+
k: self.extra_expressions[k].subs(self.constant_expressions, evaluate=False)
|
|
67
|
+
for k in self.extra_expressions
|
|
68
|
+
}
|
|
69
|
+
if self.constant_expressions is not None
|
|
70
|
+
else deepcopy(self.extra_expressions)
|
|
71
|
+
)
|
|
72
|
+
else:
|
|
73
|
+
_extra_expressions = None
|
|
74
|
+
|
|
75
|
+
# replace constants in objective functions, if constants have been defined
|
|
76
|
+
_objective_expressions = (
|
|
77
|
+
{
|
|
78
|
+
k: self.objective_expressions[k].subs(self.constant_expressions, evaluate=False)
|
|
79
|
+
for k in self.objective_expressions
|
|
80
|
+
}
|
|
81
|
+
if self.constant_expressions is not None
|
|
82
|
+
else deepcopy(self.objective_expressions)
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# replace extra functions in objective functions, if extra functions have been defined
|
|
86
|
+
_objective_expressions = (
|
|
87
|
+
(
|
|
88
|
+
{
|
|
89
|
+
k: _objective_expressions[k].subs(self.extra_expressions, evaluate=False)
|
|
90
|
+
for k in _objective_expressions
|
|
91
|
+
}
|
|
92
|
+
)
|
|
93
|
+
if self.extra_expressions is not None
|
|
94
|
+
else _objective_expressions
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# always minimized objective expressions
|
|
98
|
+
_objective_expressions_min = {
|
|
99
|
+
f"{obj.symbol}_min": -_objective_expressions[obj.symbol]
|
|
100
|
+
if obj.maximize
|
|
101
|
+
else _objective_expressions[obj.symbol]
|
|
102
|
+
for obj in problem.objectives
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# replace stuff in the constraint expressions if any are defined
|
|
106
|
+
if self.constraint_expressions is not None:
|
|
107
|
+
# replace constants
|
|
108
|
+
_constraint_expressions = (
|
|
109
|
+
{
|
|
110
|
+
k: self.constraint_expressions[k].subs(self.constant_expressions, evaluate=False)
|
|
111
|
+
for k in self.constraint_expressions
|
|
112
|
+
}
|
|
113
|
+
if self.constant_expressions is not None
|
|
114
|
+
else deepcopy(self.constraint_expressions)
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# replace extra functions
|
|
118
|
+
_constraint_expressions = (
|
|
119
|
+
{
|
|
120
|
+
k: _constraint_expressions[k].subs(_extra_expressions, evaluate=False)
|
|
121
|
+
for k in _constraint_expressions
|
|
122
|
+
}
|
|
123
|
+
if _extra_expressions is not None
|
|
124
|
+
else _constraint_expressions
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# replace objective functions
|
|
128
|
+
_constraint_expressions = {
|
|
129
|
+
k: _constraint_expressions[k].subs(_objective_expressions, evaluate=False)
|
|
130
|
+
for k in _constraint_expressions
|
|
131
|
+
}
|
|
132
|
+
_constraint_expressions = {
|
|
133
|
+
k: _constraint_expressions[k].subs(_objective_expressions_min, evaluate=False)
|
|
134
|
+
for k in _constraint_expressions
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
else:
|
|
138
|
+
_constraint_expressions = None
|
|
139
|
+
|
|
140
|
+
# replace stuff in scalarization expressions if any are defined
|
|
141
|
+
if self.scalarization_expressions is not None:
|
|
142
|
+
# replace constants
|
|
143
|
+
_scalarization_expressions = (
|
|
144
|
+
{
|
|
145
|
+
k: self.scalarization_expressions[k].subs(self.constant_expressions, evaluate=False)
|
|
146
|
+
for k in self.scalarization_expressions
|
|
147
|
+
}
|
|
148
|
+
if self.constant_expressions is not None
|
|
149
|
+
else deepcopy(self.scalarization_expressions)
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# replace extra functions
|
|
153
|
+
_scalarization_expressions = (
|
|
154
|
+
{
|
|
155
|
+
k: _scalarization_expressions[k].subs(_extra_expressions, evaluate=False)
|
|
156
|
+
for k in _scalarization_expressions
|
|
157
|
+
}
|
|
158
|
+
if _extra_expressions is not None
|
|
159
|
+
else _scalarization_expressions
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# replace constraints
|
|
163
|
+
_scalarization_expressions = (
|
|
164
|
+
{
|
|
165
|
+
k: _scalarization_expressions[k].subs(_constraint_expressions, evaluate=False)
|
|
166
|
+
for k in _scalarization_expressions
|
|
167
|
+
}
|
|
168
|
+
if _constraint_expressions is not None
|
|
169
|
+
else _scalarization_expressions
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
# replace objectives
|
|
173
|
+
_scalarization_expressions = {
|
|
174
|
+
k: _scalarization_expressions[k].subs(_objective_expressions, evaluate=False)
|
|
175
|
+
for k in _scalarization_expressions
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
_scalarization_expressions = {
|
|
179
|
+
k: _scalarization_expressions[k].subs(_objective_expressions_min, evaluate=False)
|
|
180
|
+
for k in _scalarization_expressions
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
else:
|
|
184
|
+
_scalarization_expressions = None
|
|
185
|
+
|
|
186
|
+
# initialize callable lambdas
|
|
187
|
+
self.lambda_exprs = {
|
|
188
|
+
_k: _v
|
|
189
|
+
for _d in [
|
|
190
|
+
{k: sp.lambdify(self.variable_symbols, d[k]) for k in d}
|
|
191
|
+
for d in [
|
|
192
|
+
_extra_expressions,
|
|
193
|
+
_objective_expressions,
|
|
194
|
+
_objective_expressions_min,
|
|
195
|
+
_constraint_expressions,
|
|
196
|
+
_scalarization_expressions,
|
|
197
|
+
]
|
|
198
|
+
if d is not None
|
|
199
|
+
]
|
|
200
|
+
for _k, _v in _d.items()
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
self.problem = problem
|
|
204
|
+
self.parser = parser
|
|
205
|
+
|
|
206
|
+
def evaluate(self, xs: dict[str, float | int | bool]) -> dict[str, float | int | bool]:
|
|
207
|
+
"""Evaluate the the whole problem with a given decision variable dict.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
xs (dict[str, float | int | bool]): a dict with keys representing decision variable
|
|
211
|
+
symbols and values with the decision variable value.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
dict[str, float | int | bool]: a dict with keys corresponding to each symbol
|
|
215
|
+
defined for the problem being evaluated and the corresponding expression's
|
|
216
|
+
value.
|
|
217
|
+
"""
|
|
218
|
+
return {k: self.lambda_exprs[k](**xs) for k in self.lambda_exprs} | xs
|
|
219
|
+
|
|
220
|
+
def evaluate_target(self, xs: dict[str, float | int | bool], target: str) -> float:
|
|
221
|
+
"""Evaluates only the specified target with given decision variables.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
xs (dict[str, float | int | bool]): a dict with keys representing decision variable
|
|
225
|
+
symbols and values with the decision variable value.
|
|
226
|
+
target (str): the symbol of the function expressions to be evaluated.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
float: the value of the target once evaluated.
|
|
230
|
+
"""
|
|
231
|
+
return self.lambda_exprs[target](**xs)
|
|
232
|
+
|
|
233
|
+
def evaluate_constraints(self, xs: dict[str, float | int | bool]) -> dict[str, float | int | bool]:
|
|
234
|
+
"""Evaluates the constraints of the problem with given decision variables.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
xs (dict[str, float | int | bool]): a dict with keys representing decision variable
|
|
238
|
+
symbols and values with the decision variable value.
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
dict[str, float | int | bool]: a dict with keys being the constraints symbols
|
|
242
|
+
and values being the value of the corresponding constraint.
|
|
243
|
+
"""
|
|
244
|
+
return {k: self.lambda_exprs[k](**xs) for k in [constr.symbol for constr in self.problem.constraints]}
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Pre-defined multiobjective optimization problems.
|
|
2
|
+
|
|
3
|
+
Pre-defined problems for, e.g.,
|
|
4
|
+
testing and illustration purposed are defined here.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"binh_and_korn",
|
|
9
|
+
"dtlz2",
|
|
10
|
+
"forest_problem",
|
|
11
|
+
"forest_problem_discrete",
|
|
12
|
+
"simple_knapsack",
|
|
13
|
+
"simple_knapsack_vectors",
|
|
14
|
+
"momip_ti2",
|
|
15
|
+
"momip_ti7",
|
|
16
|
+
"nimbus_test_problem",
|
|
17
|
+
"pareto_navigator_test_problem",
|
|
18
|
+
"re21",
|
|
19
|
+
"re22",
|
|
20
|
+
"re23",
|
|
21
|
+
"re24",
|
|
22
|
+
"river_pollution_problem",
|
|
23
|
+
"river_pollution_problem_discrete",
|
|
24
|
+
"river_pollution_scenario",
|
|
25
|
+
"simple_data_problem",
|
|
26
|
+
"simple_integer_test_problem",
|
|
27
|
+
"simple_linear_test_problem",
|
|
28
|
+
"simple_scenario_test_problem",
|
|
29
|
+
"simple_test_problem",
|
|
30
|
+
"simulator_problem",
|
|
31
|
+
"spanish_sustainability_problem",
|
|
32
|
+
"spanish_sustainability_problem_discrete",
|
|
33
|
+
"zdt1",
|
|
34
|
+
"zdt2",
|
|
35
|
+
"zdt3",
|
|
36
|
+
"rocket_injector_design",
|
|
37
|
+
"mixed_variable_dimensions_problem",
|
|
38
|
+
"mcwb_solid_rectangular_problem"
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
from .binh_and_korn_problem import binh_and_korn
|
|
43
|
+
from .dtlz2_problem import dtlz2
|
|
44
|
+
from .forest_problem import forest_problem, forest_problem_discrete
|
|
45
|
+
from .knapsack_problem import simple_knapsack, simple_knapsack_vectors
|
|
46
|
+
from .mixed_variable_dimenrions_problem import mixed_variable_dimensions_problem
|
|
47
|
+
from .momip_problem import momip_ti2, momip_ti7
|
|
48
|
+
from .nimbus_problem import nimbus_test_problem
|
|
49
|
+
from .pareto_navigator_problem import pareto_navigator_test_problem
|
|
50
|
+
from .re_problem import re21, re22, re23, re24
|
|
51
|
+
from .river_pollution_problem import (
|
|
52
|
+
river_pollution_problem,
|
|
53
|
+
river_pollution_problem_discrete,
|
|
54
|
+
river_pollution_scenario,
|
|
55
|
+
)
|
|
56
|
+
from .rocket_injector_design_problem import rocket_injector_design
|
|
57
|
+
from .simple_problem import (
|
|
58
|
+
simple_data_problem,
|
|
59
|
+
simple_integer_test_problem,
|
|
60
|
+
simple_linear_test_problem,
|
|
61
|
+
simple_scenario_test_problem,
|
|
62
|
+
simple_test_problem,
|
|
63
|
+
)
|
|
64
|
+
from .simulator_problem import simulator_problem
|
|
65
|
+
from .spanish_sustainability_problem import (
|
|
66
|
+
spanish_sustainability_problem,
|
|
67
|
+
spanish_sustainability_problem_discrete,
|
|
68
|
+
)
|
|
69
|
+
from .zdt_problem import zdt1, zdt2, zdt3
|
|
70
|
+
|
|
71
|
+
from .mcwb_problem import (mcwb_solid_rectangular_problem, mcwb_hollow_rectangular_problem,
|
|
72
|
+
mcwb_equilateral_tbeam_problem, mcwb_square_channel_problem, mcwb_tapered_channel_problem,
|
|
73
|
+
mcwb_ragsdell1976_problem)
|