desdeo 1.2__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. desdeo/__init__.py +8 -8
  2. desdeo/adm/ADMAfsar.py +551 -0
  3. desdeo/adm/ADMChen.py +414 -0
  4. desdeo/adm/BaseADM.py +119 -0
  5. desdeo/adm/__init__.py +11 -0
  6. desdeo/api/README.md +73 -0
  7. desdeo/api/__init__.py +15 -0
  8. desdeo/api/app.py +50 -0
  9. desdeo/api/config.py +90 -0
  10. desdeo/api/config.toml +64 -0
  11. desdeo/api/db.py +27 -0
  12. desdeo/api/db_init.py +85 -0
  13. desdeo/api/db_models.py +164 -0
  14. desdeo/api/malaga_db_init.py +27 -0
  15. desdeo/api/models/__init__.py +266 -0
  16. desdeo/api/models/archive.py +23 -0
  17. desdeo/api/models/emo.py +128 -0
  18. desdeo/api/models/enautilus.py +69 -0
  19. desdeo/api/models/gdm/gdm_aggregate.py +139 -0
  20. desdeo/api/models/gdm/gdm_base.py +69 -0
  21. desdeo/api/models/gdm/gdm_score_bands.py +114 -0
  22. desdeo/api/models/gdm/gnimbus.py +138 -0
  23. desdeo/api/models/generic.py +104 -0
  24. desdeo/api/models/generic_states.py +401 -0
  25. desdeo/api/models/nimbus.py +158 -0
  26. desdeo/api/models/preference.py +128 -0
  27. desdeo/api/models/problem.py +717 -0
  28. desdeo/api/models/reference_point_method.py +18 -0
  29. desdeo/api/models/session.py +49 -0
  30. desdeo/api/models/state.py +463 -0
  31. desdeo/api/models/user.py +52 -0
  32. desdeo/api/models/utopia.py +25 -0
  33. desdeo/api/routers/_EMO.backup +309 -0
  34. desdeo/api/routers/_NAUTILUS.py +245 -0
  35. desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
  36. desdeo/api/routers/_NIMBUS.py +765 -0
  37. desdeo/api/routers/__init__.py +5 -0
  38. desdeo/api/routers/emo.py +497 -0
  39. desdeo/api/routers/enautilus.py +237 -0
  40. desdeo/api/routers/gdm/gdm_aggregate.py +234 -0
  41. desdeo/api/routers/gdm/gdm_base.py +420 -0
  42. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_manager.py +398 -0
  43. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_routers.py +377 -0
  44. desdeo/api/routers/gdm/gnimbus/gnimbus_manager.py +698 -0
  45. desdeo/api/routers/gdm/gnimbus/gnimbus_routers.py +591 -0
  46. desdeo/api/routers/generic.py +233 -0
  47. desdeo/api/routers/nimbus.py +705 -0
  48. desdeo/api/routers/problem.py +307 -0
  49. desdeo/api/routers/reference_point_method.py +93 -0
  50. desdeo/api/routers/session.py +100 -0
  51. desdeo/api/routers/test.py +16 -0
  52. desdeo/api/routers/user_authentication.py +520 -0
  53. desdeo/api/routers/utils.py +187 -0
  54. desdeo/api/routers/utopia.py +230 -0
  55. desdeo/api/schema.py +100 -0
  56. desdeo/api/tests/__init__.py +0 -0
  57. desdeo/api/tests/conftest.py +151 -0
  58. desdeo/api/tests/test_enautilus.py +330 -0
  59. desdeo/api/tests/test_models.py +1179 -0
  60. desdeo/api/tests/test_routes.py +1075 -0
  61. desdeo/api/utils/_database.py +263 -0
  62. desdeo/api/utils/_logger.py +29 -0
  63. desdeo/api/utils/database.py +36 -0
  64. desdeo/api/utils/emo_database.py +40 -0
  65. desdeo/core.py +34 -0
  66. desdeo/emo/__init__.py +159 -0
  67. desdeo/emo/hooks/archivers.py +188 -0
  68. desdeo/emo/methods/EAs.py +541 -0
  69. desdeo/emo/methods/__init__.py +0 -0
  70. desdeo/emo/methods/bases.py +12 -0
  71. desdeo/emo/methods/templates.py +111 -0
  72. desdeo/emo/operators/__init__.py +1 -0
  73. desdeo/emo/operators/crossover.py +1282 -0
  74. desdeo/emo/operators/evaluator.py +114 -0
  75. desdeo/emo/operators/generator.py +459 -0
  76. desdeo/emo/operators/mutation.py +1224 -0
  77. desdeo/emo/operators/scalar_selection.py +202 -0
  78. desdeo/emo/operators/selection.py +1778 -0
  79. desdeo/emo/operators/termination.py +286 -0
  80. desdeo/emo/options/__init__.py +108 -0
  81. desdeo/emo/options/algorithms.py +435 -0
  82. desdeo/emo/options/crossover.py +164 -0
  83. desdeo/emo/options/generator.py +131 -0
  84. desdeo/emo/options/mutation.py +260 -0
  85. desdeo/emo/options/repair.py +61 -0
  86. desdeo/emo/options/scalar_selection.py +66 -0
  87. desdeo/emo/options/selection.py +127 -0
  88. desdeo/emo/options/templates.py +383 -0
  89. desdeo/emo/options/termination.py +143 -0
  90. desdeo/explanations/__init__.py +6 -0
  91. desdeo/explanations/explainer.py +100 -0
  92. desdeo/explanations/utils.py +90 -0
  93. desdeo/gdm/__init__.py +22 -0
  94. desdeo/gdm/gdmtools.py +45 -0
  95. desdeo/gdm/score_bands.py +114 -0
  96. desdeo/gdm/voting_rules.py +50 -0
  97. desdeo/mcdm/__init__.py +41 -0
  98. desdeo/mcdm/enautilus.py +338 -0
  99. desdeo/mcdm/gnimbus.py +484 -0
  100. desdeo/mcdm/nautili.py +345 -0
  101. desdeo/mcdm/nautilus.py +477 -0
  102. desdeo/mcdm/nautilus_navigator.py +656 -0
  103. desdeo/mcdm/nimbus.py +417 -0
  104. desdeo/mcdm/pareto_navigator.py +269 -0
  105. desdeo/mcdm/reference_point_method.py +186 -0
  106. desdeo/problem/__init__.py +83 -0
  107. desdeo/problem/evaluator.py +561 -0
  108. desdeo/problem/external/__init__.py +18 -0
  109. desdeo/problem/external/core.py +356 -0
  110. desdeo/problem/external/pymoo_provider.py +266 -0
  111. desdeo/problem/external/runtime.py +44 -0
  112. desdeo/problem/gurobipy_evaluator.py +562 -0
  113. desdeo/problem/infix_parser.py +341 -0
  114. desdeo/problem/json_parser.py +944 -0
  115. desdeo/problem/pyomo_evaluator.py +487 -0
  116. desdeo/problem/schema.py +1829 -0
  117. desdeo/problem/simulator_evaluator.py +348 -0
  118. desdeo/problem/sympy_evaluator.py +244 -0
  119. desdeo/problem/testproblems/__init__.py +88 -0
  120. desdeo/problem/testproblems/benchmarks_server.py +120 -0
  121. desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
  122. desdeo/problem/testproblems/cake_problem.py +185 -0
  123. desdeo/problem/testproblems/dmitry_forest_problem_discrete.py +71 -0
  124. desdeo/problem/testproblems/dtlz2_problem.py +102 -0
  125. desdeo/problem/testproblems/forest_problem.py +283 -0
  126. desdeo/problem/testproblems/knapsack_problem.py +163 -0
  127. desdeo/problem/testproblems/mcwb_problem.py +831 -0
  128. desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
  129. desdeo/problem/testproblems/momip_problem.py +172 -0
  130. desdeo/problem/testproblems/multi_valued_constraints.py +119 -0
  131. desdeo/problem/testproblems/nimbus_problem.py +143 -0
  132. desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
  133. desdeo/problem/testproblems/re_problem.py +492 -0
  134. desdeo/problem/testproblems/river_pollution_problems.py +440 -0
  135. desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
  136. desdeo/problem/testproblems/simple_problem.py +351 -0
  137. desdeo/problem/testproblems/simulator_problem.py +92 -0
  138. desdeo/problem/testproblems/single_objective.py +289 -0
  139. desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
  140. desdeo/problem/testproblems/zdt_problem.py +274 -0
  141. desdeo/problem/utils.py +245 -0
  142. desdeo/tools/GenerateReferencePoints.py +181 -0
  143. desdeo/tools/__init__.py +120 -0
  144. desdeo/tools/desc_gen.py +22 -0
  145. desdeo/tools/generics.py +165 -0
  146. desdeo/tools/group_scalarization.py +3090 -0
  147. desdeo/tools/gurobipy_solver_interfaces.py +258 -0
  148. desdeo/tools/indicators_binary.py +117 -0
  149. desdeo/tools/indicators_unary.py +362 -0
  150. desdeo/tools/interaction_schema.py +38 -0
  151. desdeo/tools/intersection.py +54 -0
  152. desdeo/tools/iterative_pareto_representer.py +99 -0
  153. desdeo/tools/message.py +265 -0
  154. desdeo/tools/ng_solver_interfaces.py +199 -0
  155. desdeo/tools/non_dominated_sorting.py +134 -0
  156. desdeo/tools/patterns.py +283 -0
  157. desdeo/tools/proximal_solver.py +99 -0
  158. desdeo/tools/pyomo_solver_interfaces.py +477 -0
  159. desdeo/tools/reference_vectors.py +229 -0
  160. desdeo/tools/scalarization.py +2065 -0
  161. desdeo/tools/scipy_solver_interfaces.py +454 -0
  162. desdeo/tools/score_bands.py +627 -0
  163. desdeo/tools/utils.py +388 -0
  164. desdeo/tools/visualizations.py +67 -0
  165. desdeo/utopia_stuff/__init__.py +0 -0
  166. desdeo/utopia_stuff/data/1.json +15 -0
  167. desdeo/utopia_stuff/data/2.json +13 -0
  168. desdeo/utopia_stuff/data/3.json +15 -0
  169. desdeo/utopia_stuff/data/4.json +17 -0
  170. desdeo/utopia_stuff/data/5.json +15 -0
  171. desdeo/utopia_stuff/from_json.py +40 -0
  172. desdeo/utopia_stuff/reinit_user.py +38 -0
  173. desdeo/utopia_stuff/utopia_db_init.py +212 -0
  174. desdeo/utopia_stuff/utopia_problem.py +403 -0
  175. desdeo/utopia_stuff/utopia_problem_old.py +415 -0
  176. desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
  177. desdeo-2.1.0.dist-info/METADATA +186 -0
  178. desdeo-2.1.0.dist-info/RECORD +180 -0
  179. {desdeo-1.2.dist-info → desdeo-2.1.0.dist-info}/WHEEL +1 -1
  180. desdeo-2.1.0.dist-info/licenses/LICENSE +21 -0
  181. desdeo-1.2.dist-info/METADATA +0 -16
  182. desdeo-1.2.dist-info/RECORD +0 -4
@@ -0,0 +1,348 @@
1
+ """Evaluators are defined to evaluate simulator based and surrogate based objectives, constraints and extras."""
2
+
3
+ import json
4
+ import subprocess
5
+ import sys
6
+ from inspect import getfullargspec
7
+ from pathlib import Path
8
+ from urllib.parse import urlparse
9
+
10
+ import joblib
11
+ import numpy as np
12
+ import polars as pl
13
+ import requests
14
+
15
+ from desdeo.problem import (
16
+ MathParser,
17
+ ObjectiveTypeEnum,
18
+ PolarsEvaluator,
19
+ PolarsEvaluatorModesEnum,
20
+ Problem,
21
+ )
22
+ from desdeo.problem.external import ProviderParams, get_resolver, supported_schemes
23
+
24
+ # external resolver to resolve providers for problems defined externally of DESDEO
25
+ _external_resolver = get_resolver()
26
+
27
+
28
+ class EvaluatorError(Exception):
29
+ """Error raised when exceptions are encountered in an Evaluator."""
30
+
31
+
32
+ class SimulatorEvaluator:
33
+ """A class for creating evaluators for simulator based and surrogate based objectives, constraints and extras."""
34
+
35
+ def __init__( # noqa: PLR0912
36
+ self,
37
+ problem: Problem,
38
+ params: dict[str, dict] | ProviderParams | None = None,
39
+ surrogate_paths: dict[str, Path] | None = None,
40
+ ):
41
+ """Creating an evaluator for simulator based and surrogate based objectives, constraints and extras.
42
+
43
+ Args:
44
+ problem (Problem): The problem as a pydantic 'Problem' data class.
45
+ params (dict[str, dict], optional): Parameters for the different simulators used in the problem.
46
+ Given as dict with the simulators' symbols as keys and the corresponding simulator parameters
47
+ as a dict as values. Defaults to None.
48
+ surrogate_paths (dict[str, Path], optional): A dictionary where the keys are the names of the objectives,
49
+ constraints and extra functions and the values are the paths to the surrogate models saved on disk.
50
+ The names of the objectives, constraints and extra functions should match the names of the objectives,
51
+ constraints and extra functions in the problem JSON. Defaults to None.
52
+ """
53
+ self.problem = problem
54
+ # store the symbol and min or max multiplier as well (symbol, min/max multiplier [1 | -1])
55
+ self.objective_mix_max_mult = [
56
+ (objective.symbol, -1 if objective.maximize else 1) for objective in problem.objectives
57
+ ]
58
+ # Gather symbols objectives of different types into their own lists
59
+ self.analytical_symbols = [
60
+ obj.symbol
61
+ for obj in list(filter(lambda x: x.objective_type == ObjectiveTypeEnum.analytical, problem.objectives))
62
+ ]
63
+ self.data_based_symbols = [
64
+ obj.symbol for obj in problem.objectives if obj.objective_type == ObjectiveTypeEnum.data_based
65
+ ]
66
+ self.simulator_symbols = [
67
+ obj.symbol
68
+ for obj in list(filter(lambda x: x.objective_type == ObjectiveTypeEnum.simulator, problem.objectives))
69
+ ]
70
+ self.surrogate_symbols = [
71
+ obj.symbol
72
+ for obj in list(filter(lambda x: x.objective_type == ObjectiveTypeEnum.surrogate, problem.objectives))
73
+ ]
74
+ if problem.scalarization_funcs is not None:
75
+ parser = MathParser()
76
+ self.scalarization_funcs = [
77
+ (func.symbol, parser.parse(func.func))
78
+ for func in problem.scalarization_funcs
79
+ if func.symbol is not None
80
+ ]
81
+ else:
82
+ self.scalarization_funcs = []
83
+ # Gather any constraints' symbols
84
+ if problem.constraints is not None:
85
+ self.analytical_symbols = self.analytical_symbols + [
86
+ con.symbol for con in list(filter(lambda x: x.func is not None, problem.constraints))
87
+ ]
88
+ self.simulator_symbols = self.simulator_symbols + [
89
+ con.symbol for con in list(filter(lambda x: x.simulator_path is not None, problem.constraints))
90
+ ]
91
+ self.surrogate_symbols = self.surrogate_symbols + [
92
+ con.symbol for con in list(filter(lambda x: x.surrogates is not None, problem.constraints))
93
+ ]
94
+
95
+ # Gather any extra functions' symbols
96
+ if problem.extra_funcs is not None:
97
+ self.analytical_symbols = self.analytical_symbols + [
98
+ extra.symbol for extra in list(filter(lambda x: x.func is not None, problem.extra_funcs))
99
+ ]
100
+ self.simulator_symbols = self.simulator_symbols + [
101
+ extra.symbol for extra in list(filter(lambda x: x.simulator_path is not None, problem.extra_funcs))
102
+ ]
103
+ self.surrogate_symbols = self.surrogate_symbols + [
104
+ extra.symbol for extra in list(filter(lambda x: x.surrogates is not None, problem.extra_funcs))
105
+ ]
106
+
107
+ # Gather all the symbols of objectives, constraints and extra functions
108
+ self.problem_symbols = (
109
+ self.analytical_symbols + self.data_based_symbols + self.simulator_symbols + self.surrogate_symbols
110
+ )
111
+
112
+ # Gather the possible simulators
113
+ self.simulators = problem.simulators if problem.simulators is not None else []
114
+
115
+ # Gather the possibly given parameters
116
+ if params and not isinstance(params, dict):
117
+ params = params.model_dump()
118
+ self.params = {}
119
+ for sim in self.simulators:
120
+ sim_params = params.get(sim.name, {}) if params is not None else {}
121
+ if sim.parameter_options is not None:
122
+ for key in sim.parameter_options:
123
+ sim_params[key] = sim.parameter_options[key]
124
+ self.params[sim.name] = sim_params
125
+
126
+ self.surrogates = {}
127
+ if surrogate_paths is not None:
128
+ self._load_surrogates(surrogate_paths)
129
+ else:
130
+ self._load_surrogates()
131
+
132
+ if len(self.surrogate_symbols) > 0:
133
+ missing_surrogates = []
134
+ for symbol in self.surrogate_symbols:
135
+ if symbol not in self.surrogates:
136
+ missing_surrogates.append(symbol)
137
+
138
+ if len(missing_surrogates) > 0:
139
+ raise EvaluatorError(f"Some surrogates missing: {missing_surrogates}.")
140
+
141
+ def _evaluate_simulator(self, xs: dict[str, list[int | float]]) -> pl.DataFrame:
142
+ """Evaluate the problem for the given decision variables using the problem's simulators.
143
+
144
+ Args:
145
+ xs (dict[str, list[int | float]]): The decision variables for which the functions are to be evaluated.
146
+ Given as a dictionary with the decision variable symbols as keys and a list of decision variable values
147
+ as the values. The length of the lists is the number of samples and each list should have the same
148
+ length (same number of samples).
149
+
150
+ Returns:
151
+ pl.DataFrame: The objective, constraint and extra function values for the given decision variables as
152
+ a polars dataframe. The symbols of the objectives, constraints and extra functions are the column names
153
+ and the length of the columns is the number of samples. Will return those objective, constraint and
154
+ extra function values that are gained from simulators listed in the problem object.
155
+ """
156
+ res_df = pl.DataFrame()
157
+ for sim in self.simulators:
158
+ # gather the possible parameters for the simulator
159
+ params = self.params.get(sim.name, {})
160
+ if sim.file is not None:
161
+ # call the simulator with the decision variable values and parameters as dicts
162
+ res = subprocess.run(
163
+ [sys.executable, sim.file, "-d", str(xs), "-p", str(params)], capture_output=True, text=True
164
+ )
165
+ if res.returncode == 0:
166
+ # gather the simulation results (a dict) into the results dataframe
167
+ res_df = res_df.hstack(pl.DataFrame(json.loads(res.stdout)))
168
+ else:
169
+ raise EvaluatorError(res.stderr)
170
+ elif sim.url is not None:
171
+ # call the endpoint
172
+ try:
173
+ if isinstance(xs, pl.DataFrame):
174
+ # if xs is a polars dataframe, convert it to a dict
175
+ xs = xs.to_dict(as_series=False)
176
+ scheme = urlparse(sim.url.url).scheme
177
+ if scheme in supported_schemes:
178
+ # desdeo
179
+ res = _external_resolver.evaluate(sim.url.url, params, xs)
180
+ res_df = res_df.hstack(pl.DataFrame(res))
181
+ # parse res
182
+ else:
183
+ # http, https, etc...
184
+ res = requests.get(sim.url.url, auth=sim.url.auth, json={"d": xs, "p": params})
185
+ res.raise_for_status() # raise an error if the request failed
186
+ res_df = res_df.hstack(pl.DataFrame(res.json()))
187
+ except requests.RequestException as e:
188
+ raise EvaluatorError(
189
+ f"Failed to call the simulator at {sim.url}. Is the simulator server running?"
190
+ ) from e
191
+
192
+ # Evaluate the minimization form of the objective functions
193
+ min_obj_columns = pl.DataFrame()
194
+ for symbol, min_max_mult in self.objective_mix_max_mult:
195
+ if symbol in res_df.columns:
196
+ min_obj_columns = min_obj_columns.hstack(
197
+ res_df.select((min_max_mult * pl.col(f"{symbol}")).alias(f"{symbol}_min"))
198
+ )
199
+
200
+ res_df = res_df.hstack(min_obj_columns)
201
+ # If there are scalarization functions, evaluate them as well
202
+ scalarization_columns = res_df.select(*[expr.alias(symbol) for symbol, expr in self.scalarization_funcs])
203
+ return res_df.hstack(scalarization_columns)
204
+
205
+ def _evaluate_surrogates(self, xs: dict[str, list[int | float]]) -> pl.DataFrame:
206
+ """Evaluate the problem for the given decision variables using the surrogate models.
207
+
208
+ Args:
209
+ xs (dict[str, list[int | float]]): The decision variables for which the functions are to be evaluated.
210
+ Given as a dictionary with the decision variable symbols as keys and a list of decision variable values
211
+ as the values. The length of the lists is the number of samples and each list should have the same
212
+ length (same number of samples).
213
+
214
+ Returns:
215
+ pl.DataFrame: The values of the evaluated objectives, constraints and extra functions as a polars
216
+ dataframe. The uncertainty prediction values are also returned. If a model does not provide
217
+ uncertainty predictions, then they are set as NaN.
218
+ """
219
+ res = pl.DataFrame()
220
+ var = np.array([value for _, value in xs.items()]).T # has to be transpose (at least for sklearn models)
221
+ for symbol in self.surrogates:
222
+ # get a list of args accepted by the model's predict function
223
+ accepted_args = getfullargspec(self.surrogates[symbol].predict).args
224
+ # if "return_std" accepted, gather the uncertainty predictions as well
225
+ if "return_std" in accepted_args:
226
+ value, uncertainty = self.surrogates[symbol].predict(var, return_std=True)
227
+ # otherwise, set the uncertainties as NaN
228
+ else:
229
+ value = self.surrogates[symbol].predict(var)
230
+ uncertainty = np.full(np.shape(value), np.nan)
231
+ # add the objects, constraints and extra functions into the polars dataframe
232
+ # values go into columns with the symbol as the column names
233
+ res = res.with_columns(pl.Series(value).alias(symbol))
234
+ # uncertainties go into columns with {symbol}_uncert as the column names
235
+ res = res.with_columns(pl.Series(uncertainty).alias(f"{symbol}_uncert"))
236
+
237
+ # Evaluate the minimization form of the objective functions
238
+ min_obj_columns = pl.DataFrame()
239
+ for symbol, min_max_mult in self.objective_mix_max_mult:
240
+ if symbol in res.columns:
241
+ min_obj_columns = min_obj_columns.hstack(
242
+ res.select((min_max_mult * pl.col(f"{symbol}")).alias(f"{symbol}_min"))
243
+ )
244
+ res_df = res.hstack(min_obj_columns)
245
+ # If there are scalarization functions, evaluate them as well
246
+ scalarization_columns = res_df.select(*[expr.alias(symbol) for symbol, expr in self.scalarization_funcs])
247
+ return res_df.hstack(scalarization_columns)
248
+
249
+ def _load_surrogates(self, surrogate_paths: dict[str, Path] | None = None):
250
+ """Load the surrogate models from disk and store them within the evaluator.
251
+
252
+ This is used during initialization of the evaluator or when the analyst wants to replace the current surrogate
253
+ models with other models. However if a new model is trained after initialization of the evaluator, the problem
254
+ JSON should be updated with the new model paths and the evaluator should be re-initialized. This can happen
255
+ with any solver that does model management.
256
+
257
+ Args:
258
+ surrogate_paths (dict[str, Path]): A dictionary where the keys are the names of the objectives, constraints
259
+ and extra functions and the values are the paths to the surrogate models saved on disk. The names of
260
+ the objectives should match the names of the objectives in the problem JSON. At the moment the supported
261
+ file format is .skops (through skops.io). TODO: if skops.io used, should be added to pyproject.toml.
262
+ """
263
+ if surrogate_paths is not None:
264
+ for symbol in surrogate_paths:
265
+ with Path.open(f"{surrogate_paths[symbol]}", "rb") as file:
266
+ self.surrogates[symbol] = joblib.load(file)
267
+ """unknown_types = sio.get_untrusted_types(file=file)
268
+ if len(unknown_types) == 0:
269
+ self.surrogates[symbol] = sio.load(file, unknown_types)
270
+ else: # TODO: if there are unknown types they should be checked
271
+ self.surrogates[symbol] = sio.load(file, unknown_types)
272
+ #raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
273
+ else:
274
+ # check each surrogate based objective, constraint and extra function for surrogate path
275
+ for obj in self.problem.objectives:
276
+ if obj.surrogates is not None:
277
+ with Path.open(f"{obj.surrogates[0]}", "rb") as file:
278
+ self.surrogates[obj.symbol] = joblib.load(file)
279
+ """unknown_types = sio.get_untrusted_types(file=file)
280
+ if len(unknown_types) == 0:
281
+ self.surrogates[obj.symbol] = sio.load(file, unknown_types)
282
+ else: # TODO: if there are unknown types they should be checked
283
+ self.surrogates[obj.symbol] = sio.load(file, unknown_types)
284
+ #raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
285
+ for con in self.problem.constraints or []: # if there are no constraints, an empty list is used
286
+ if con.surrogates is not None:
287
+ with Path.open(f"{con.surrogates[0]}", "rb") as file:
288
+ self.surrogates[con.symbol] = joblib.load(file)
289
+ """unknown_types = sio.get_untrusted_types(file=file)
290
+ if len(unknown_types) == 0:
291
+ self.surrogates[con.symbol] = sio.load(file, unknown_types)
292
+ else: # TODO: if there are unknown types they should be checked
293
+ self.surrogates[con.symbol] = sio.load(file, unknown_types)
294
+ #raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
295
+ for extra in self.problem.extra_funcs or []: # if there are no extra functions, an empty list is used
296
+ if extra.surrogates is not None:
297
+ with Path.open(f"{extra.surrogates[0]}", "rb") as file:
298
+ self.surrogates[extra.symbol] = joblib.load(file)
299
+ """unknown_types = sio.get_untrusted_types(file=file)
300
+ if len(unknown_types) == 0:
301
+ self.surrogates[extra.symbol] = sio.load(file, unknown_types)
302
+ else: # TODO: if there are unknown types they should be checked
303
+ self.surrogates[extra.symbol] = sio.load(file, unknown_types)
304
+ #raise EvaluatorError(f"Untrusted types found in the model of {obj.symbol}: {unknown_types}")"""
305
+
306
+ def evaluate(self, xs: dict[str, list[int | float]], flat: bool = False) -> pl.DataFrame:
307
+ """Evaluate the functions for the given decision variables.
308
+
309
+ Evaluates analytical, simulation based and surrogate based functions. For now, the evaluator assumes that there
310
+ are no data based objectives.
311
+
312
+ Args:
313
+ xs (dict[str, list[int | float]]): The decision variables for which the functions are to be evaluated.
314
+ Given as a dictionary with the decision variable symbols as keys and a list of decision variable values
315
+ as the values. The length of the lists is the number of samples and each list should have the same
316
+ length (same number of samples).
317
+ flat (bool, optional): whether the valuation is done using flattened variables or not. Defaults to False.
318
+
319
+ Returns:
320
+ pl.DataFrame: polars dataframe with the evaluated function values.
321
+ """
322
+ # TODO (@gialmisi): Make work with polars dataframes as well in addition to dict.
323
+ # See, e.g., PolarsEvaluator._polars_evaluate. Then, remove the arg `flat`.
324
+ res = pl.DataFrame()
325
+
326
+ # Evaluate the analytical functions
327
+ if len(self.analytical_symbols + self.data_based_symbols) > 0:
328
+ polars_evaluator = PolarsEvaluator(self.problem, evaluator_mode=PolarsEvaluatorModesEnum.mixed)
329
+ analytical_values = (
330
+ polars_evaluator._polars_evaluate(xs) if not flat else polars_evaluator._polars_evaluate_flat(xs)
331
+ )
332
+ res = res.hstack(analytical_values)
333
+
334
+ # Evaluate the simulator based functions
335
+ if len(self.simulator_symbols) > 0:
336
+ simulator_values = self._evaluate_simulator(xs)
337
+ res = res.hstack(simulator_values)
338
+
339
+ # Evaluate the surrogate based functions
340
+ if len(self.surrogate_symbols) > 0:
341
+ surrogate_values = self._evaluate_surrogates(xs)
342
+ res = res.hstack(surrogate_values)
343
+
344
+ # Check that everything is evaluated
345
+ for symbol in self.problem_symbols:
346
+ if symbol not in res.columns:
347
+ raise EvaluatorError(f"{symbol} not evaluated.")
348
+ return res
@@ -0,0 +1,244 @@
1
+ """Implements and evaluator based on sympy expressions."""
2
+
3
+ from copy import deepcopy
4
+
5
+ import sympy as sp
6
+
7
+ from desdeo.problem.evaluator import variable_dimension_enumerate
8
+ from desdeo.problem.json_parser import FormatEnum, MathParser
9
+ from desdeo.problem.schema import Problem
10
+
11
+ SUPPORTED_VAR_DIMENSIONS = ["scalar"]
12
+
13
+
14
+ class SympyEvaluatorError(Exception):
15
+ """Raised when an exception with a Sympy evaluator is encountered."""
16
+
17
+
18
+ class SympyEvaluator:
19
+ """Defines an evaluator that can be used to evaluate instances of Problem utilizing sympy."""
20
+
21
+ def __init__(self, problem: Problem):
22
+ """Initializes the evaluator.
23
+
24
+ Args:
25
+ problem (Problem): the problem to be evaluated.
26
+ """
27
+ if variable_dimension_enumerate(problem) not in SUPPORTED_VAR_DIMENSIONS:
28
+ msg = "SymPy evaluator does not yet support tensors."
29
+ raise SympyEvaluatorError(msg)
30
+
31
+ # Collect all the symbols and expressions in the problem
32
+ parser = MathParser(to_format=FormatEnum.sympy)
33
+
34
+ self.variable_symbols = [var.symbol for var in problem.variables]
35
+ self.constant_expressions = (
36
+ {const.symbol: parser.parse(const.value) for const in problem.constants}
37
+ if problem.constants is not None
38
+ else None
39
+ )
40
+
41
+ self.extra_expressions = (
42
+ {extra.symbol: parser.parse(extra.func) for extra in problem.extra_funcs}
43
+ if problem.extra_funcs is not None
44
+ else None
45
+ )
46
+
47
+ self.objective_expressions = {obj.symbol: parser.parse(obj.func) for obj in problem.objectives}
48
+
49
+ self.constraint_expressions = (
50
+ {con.symbol: parser.parse(con.func) for con in problem.constraints}
51
+ if problem.constraints is not None
52
+ else None
53
+ )
54
+
55
+ self.scalarization_expressions = (
56
+ {scal.symbol: parser.parse(scal.func) for scal in problem.scalarization_funcs}
57
+ if problem.scalarization_funcs is not None
58
+ else None
59
+ )
60
+
61
+ # replace symbols and create lambda functions ready to be called
62
+ # replace constants in extra functions, if they exist
63
+ if self.extra_expressions is not None:
64
+ _extra_expressions = (
65
+ {
66
+ k: self.extra_expressions[k].subs(self.constant_expressions, evaluate=False)
67
+ for k in self.extra_expressions
68
+ }
69
+ if self.constant_expressions is not None
70
+ else deepcopy(self.extra_expressions)
71
+ )
72
+ else:
73
+ _extra_expressions = None
74
+
75
+ # replace constants in objective functions, if constants have been defined
76
+ _objective_expressions = (
77
+ {
78
+ k: self.objective_expressions[k].subs(self.constant_expressions, evaluate=False)
79
+ for k in self.objective_expressions
80
+ }
81
+ if self.constant_expressions is not None
82
+ else deepcopy(self.objective_expressions)
83
+ )
84
+
85
+ # replace extra functions in objective functions, if extra functions have been defined
86
+ _objective_expressions = (
87
+ (
88
+ {
89
+ k: _objective_expressions[k].subs(self.extra_expressions, evaluate=False)
90
+ for k in _objective_expressions
91
+ }
92
+ )
93
+ if self.extra_expressions is not None
94
+ else _objective_expressions
95
+ )
96
+
97
+ # always minimized objective expressions
98
+ _objective_expressions_min = {
99
+ f"{obj.symbol}_min": -_objective_expressions[obj.symbol]
100
+ if obj.maximize
101
+ else _objective_expressions[obj.symbol]
102
+ for obj in problem.objectives
103
+ }
104
+
105
+ # replace stuff in the constraint expressions if any are defined
106
+ if self.constraint_expressions is not None:
107
+ # replace constants
108
+ _constraint_expressions = (
109
+ {
110
+ k: self.constraint_expressions[k].subs(self.constant_expressions, evaluate=False)
111
+ for k in self.constraint_expressions
112
+ }
113
+ if self.constant_expressions is not None
114
+ else deepcopy(self.constraint_expressions)
115
+ )
116
+
117
+ # replace extra functions
118
+ _constraint_expressions = (
119
+ {
120
+ k: _constraint_expressions[k].subs(_extra_expressions, evaluate=False)
121
+ for k in _constraint_expressions
122
+ }
123
+ if _extra_expressions is not None
124
+ else _constraint_expressions
125
+ )
126
+
127
+ # replace objective functions
128
+ _constraint_expressions = {
129
+ k: _constraint_expressions[k].subs(_objective_expressions, evaluate=False)
130
+ for k in _constraint_expressions
131
+ }
132
+ _constraint_expressions = {
133
+ k: _constraint_expressions[k].subs(_objective_expressions_min, evaluate=False)
134
+ for k in _constraint_expressions
135
+ }
136
+
137
+ else:
138
+ _constraint_expressions = None
139
+
140
+ # replace stuff in scalarization expressions if any are defined
141
+ if self.scalarization_expressions is not None:
142
+ # replace constants
143
+ _scalarization_expressions = (
144
+ {
145
+ k: self.scalarization_expressions[k].subs(self.constant_expressions, evaluate=False)
146
+ for k in self.scalarization_expressions
147
+ }
148
+ if self.constant_expressions is not None
149
+ else deepcopy(self.scalarization_expressions)
150
+ )
151
+
152
+ # replace extra functions
153
+ _scalarization_expressions = (
154
+ {
155
+ k: _scalarization_expressions[k].subs(_extra_expressions, evaluate=False)
156
+ for k in _scalarization_expressions
157
+ }
158
+ if _extra_expressions is not None
159
+ else _scalarization_expressions
160
+ )
161
+
162
+ # replace constraints
163
+ _scalarization_expressions = (
164
+ {
165
+ k: _scalarization_expressions[k].subs(_constraint_expressions, evaluate=False)
166
+ for k in _scalarization_expressions
167
+ }
168
+ if _constraint_expressions is not None
169
+ else _scalarization_expressions
170
+ )
171
+
172
+ # replace objectives
173
+ _scalarization_expressions = {
174
+ k: _scalarization_expressions[k].subs(_objective_expressions, evaluate=False)
175
+ for k in _scalarization_expressions
176
+ }
177
+
178
+ _scalarization_expressions = {
179
+ k: _scalarization_expressions[k].subs(_objective_expressions_min, evaluate=False)
180
+ for k in _scalarization_expressions
181
+ }
182
+
183
+ else:
184
+ _scalarization_expressions = None
185
+
186
+ # initialize callable lambdas
187
+ self.lambda_exprs = {
188
+ _k: _v
189
+ for _d in [
190
+ {k: sp.lambdify(self.variable_symbols, d[k]) for k in d}
191
+ for d in [
192
+ _extra_expressions,
193
+ _objective_expressions,
194
+ _objective_expressions_min,
195
+ _constraint_expressions,
196
+ _scalarization_expressions,
197
+ ]
198
+ if d is not None
199
+ ]
200
+ for _k, _v in _d.items()
201
+ }
202
+
203
+ self.problem = problem
204
+ self.parser = parser
205
+
206
+ def evaluate(self, xs: dict[str, float | int | bool]) -> dict[str, float | int | bool]:
207
+ """Evaluate the the whole problem with a given decision variable dict.
208
+
209
+ Args:
210
+ xs (dict[str, float | int | bool]): a dict with keys representing decision variable
211
+ symbols and values with the decision variable value.
212
+
213
+ Returns:
214
+ dict[str, float | int | bool]: a dict with keys corresponding to each symbol
215
+ defined for the problem being evaluated and the corresponding expression's
216
+ value.
217
+ """
218
+ return {k: self.lambda_exprs[k](**xs) for k in self.lambda_exprs} | xs
219
+
220
+ def evaluate_target(self, xs: dict[str, float | int | bool], target: str) -> float:
221
+ """Evaluates only the specified target with given decision variables.
222
+
223
+ Args:
224
+ xs (dict[str, float | int | bool]): a dict with keys representing decision variable
225
+ symbols and values with the decision variable value.
226
+ target (str): the symbol of the function expressions to be evaluated.
227
+
228
+ Returns:
229
+ float: the value of the target once evaluated.
230
+ """
231
+ return self.lambda_exprs[target](**xs)
232
+
233
+ def evaluate_constraints(self, xs: dict[str, float | int | bool]) -> dict[str, float | int | bool]:
234
+ """Evaluates the constraints of the problem with given decision variables.
235
+
236
+ Args:
237
+ xs (dict[str, float | int | bool]): a dict with keys representing decision variable
238
+ symbols and values with the decision variable value.
239
+
240
+ Returns:
241
+ dict[str, float | int | bool]: a dict with keys being the constraints symbols
242
+ and values being the value of the corresponding constraint.
243
+ """
244
+ return {k: self.lambda_exprs[k](**xs) for k in [constr.symbol for constr in self.problem.constraints]}
@@ -0,0 +1,88 @@
1
+ """Pre-defined multiobjective optimization problems.
2
+
3
+ Pre-defined problems for, e.g.,
4
+ testing and illustration purposed are defined here.
5
+ """
6
+
7
+ __all__ = [ # noqa: RUF022
8
+ "binh_and_korn",
9
+ "dtlz2",
10
+ "forest_problem",
11
+ "forest_problem_discrete",
12
+ "mcwb_equilateral_tbeam_problem",
13
+ "mcwb_hollow_rectangular_problem",
14
+ "mcwb_ragsdell1976_problem",
15
+ "mcwb_solid_rectangular_problem",
16
+ "mcwb_square_channel_problem",
17
+ "mcwb_tapered_channel_problem",
18
+ "mixed_variable_dimensions_problem",
19
+ "momip_ti2",
20
+ "momip_ti7",
21
+ "multi_valued_constraint_problem",
22
+ "nimbus_test_problem",
23
+ "pareto_navigator_test_problem",
24
+ "re21",
25
+ "re22",
26
+ "re23",
27
+ "re24",
28
+ "river_pollution_problem",
29
+ "river_pollution_problem_discrete",
30
+ "river_pollution_scenario",
31
+ "rocket_injector_design",
32
+ "simple_data_problem",
33
+ "simple_integer_test_problem",
34
+ "simple_knapsack",
35
+ "simple_knapsack_vectors",
36
+ "simple_linear_test_problem",
37
+ "simple_scenario_test_problem",
38
+ "simple_test_problem",
39
+ "simulator_problem",
40
+ "spanish_sustainability_problem",
41
+ "spanish_sustainability_problem_discrete",
42
+ "zdt1",
43
+ "zdt2",
44
+ "zdt3",
45
+ "best_cake_problem",
46
+ "dmitry_forest_problem_disc",
47
+ ]
48
+
49
+
50
+ from .binh_and_korn_problem import binh_and_korn
51
+ from .cake_problem import best_cake_problem
52
+ from .dmitry_forest_problem_discrete import dmitry_forest_problem_disc
53
+ from .dtlz2_problem import dtlz2
54
+ from .forest_problem import forest_problem, forest_problem_discrete
55
+ from .knapsack_problem import simple_knapsack, simple_knapsack_vectors
56
+ from .mcwb_problem import (
57
+ mcwb_equilateral_tbeam_problem,
58
+ mcwb_hollow_rectangular_problem,
59
+ mcwb_ragsdell1976_problem,
60
+ mcwb_solid_rectangular_problem,
61
+ mcwb_square_channel_problem,
62
+ mcwb_tapered_channel_problem,
63
+ )
64
+ from .mixed_variable_dimenrions_problem import mixed_variable_dimensions_problem
65
+ from .momip_problem import momip_ti2, momip_ti7
66
+ from .multi_valued_constraints import multi_valued_constraint_problem
67
+ from .nimbus_problem import nimbus_test_problem
68
+ from .pareto_navigator_problem import pareto_navigator_test_problem
69
+ from .re_problem import re21, re22, re23, re24
70
+ from .river_pollution_problems import (
71
+ river_pollution_problem,
72
+ river_pollution_problem_discrete,
73
+ river_pollution_scenario,
74
+ )
75
+ from .rocket_injector_design_problem import rocket_injector_design
76
+ from .simple_problem import (
77
+ simple_data_problem,
78
+ simple_integer_test_problem,
79
+ simple_linear_test_problem,
80
+ simple_scenario_test_problem,
81
+ simple_test_problem,
82
+ )
83
+ from .simulator_problem import simulator_problem
84
+ from .spanish_sustainability_problem import (
85
+ spanish_sustainability_problem,
86
+ spanish_sustainability_problem_discrete,
87
+ )
88
+ from .zdt_problem import zdt1, zdt2, zdt3