desdeo 1.2__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. desdeo/__init__.py +8 -8
  2. desdeo/adm/ADMAfsar.py +551 -0
  3. desdeo/adm/ADMChen.py +414 -0
  4. desdeo/adm/BaseADM.py +119 -0
  5. desdeo/adm/__init__.py +11 -0
  6. desdeo/api/README.md +73 -0
  7. desdeo/api/__init__.py +15 -0
  8. desdeo/api/app.py +50 -0
  9. desdeo/api/config.py +90 -0
  10. desdeo/api/config.toml +64 -0
  11. desdeo/api/db.py +27 -0
  12. desdeo/api/db_init.py +85 -0
  13. desdeo/api/db_models.py +164 -0
  14. desdeo/api/malaga_db_init.py +27 -0
  15. desdeo/api/models/__init__.py +266 -0
  16. desdeo/api/models/archive.py +23 -0
  17. desdeo/api/models/emo.py +128 -0
  18. desdeo/api/models/enautilus.py +69 -0
  19. desdeo/api/models/gdm/gdm_aggregate.py +139 -0
  20. desdeo/api/models/gdm/gdm_base.py +69 -0
  21. desdeo/api/models/gdm/gdm_score_bands.py +114 -0
  22. desdeo/api/models/gdm/gnimbus.py +138 -0
  23. desdeo/api/models/generic.py +104 -0
  24. desdeo/api/models/generic_states.py +401 -0
  25. desdeo/api/models/nimbus.py +158 -0
  26. desdeo/api/models/preference.py +128 -0
  27. desdeo/api/models/problem.py +717 -0
  28. desdeo/api/models/reference_point_method.py +18 -0
  29. desdeo/api/models/session.py +49 -0
  30. desdeo/api/models/state.py +463 -0
  31. desdeo/api/models/user.py +52 -0
  32. desdeo/api/models/utopia.py +25 -0
  33. desdeo/api/routers/_EMO.backup +309 -0
  34. desdeo/api/routers/_NAUTILUS.py +245 -0
  35. desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
  36. desdeo/api/routers/_NIMBUS.py +765 -0
  37. desdeo/api/routers/__init__.py +5 -0
  38. desdeo/api/routers/emo.py +497 -0
  39. desdeo/api/routers/enautilus.py +237 -0
  40. desdeo/api/routers/gdm/gdm_aggregate.py +234 -0
  41. desdeo/api/routers/gdm/gdm_base.py +420 -0
  42. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_manager.py +398 -0
  43. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_routers.py +377 -0
  44. desdeo/api/routers/gdm/gnimbus/gnimbus_manager.py +698 -0
  45. desdeo/api/routers/gdm/gnimbus/gnimbus_routers.py +591 -0
  46. desdeo/api/routers/generic.py +233 -0
  47. desdeo/api/routers/nimbus.py +705 -0
  48. desdeo/api/routers/problem.py +307 -0
  49. desdeo/api/routers/reference_point_method.py +93 -0
  50. desdeo/api/routers/session.py +100 -0
  51. desdeo/api/routers/test.py +16 -0
  52. desdeo/api/routers/user_authentication.py +520 -0
  53. desdeo/api/routers/utils.py +187 -0
  54. desdeo/api/routers/utopia.py +230 -0
  55. desdeo/api/schema.py +100 -0
  56. desdeo/api/tests/__init__.py +0 -0
  57. desdeo/api/tests/conftest.py +151 -0
  58. desdeo/api/tests/test_enautilus.py +330 -0
  59. desdeo/api/tests/test_models.py +1179 -0
  60. desdeo/api/tests/test_routes.py +1075 -0
  61. desdeo/api/utils/_database.py +263 -0
  62. desdeo/api/utils/_logger.py +29 -0
  63. desdeo/api/utils/database.py +36 -0
  64. desdeo/api/utils/emo_database.py +40 -0
  65. desdeo/core.py +34 -0
  66. desdeo/emo/__init__.py +159 -0
  67. desdeo/emo/hooks/archivers.py +188 -0
  68. desdeo/emo/methods/EAs.py +541 -0
  69. desdeo/emo/methods/__init__.py +0 -0
  70. desdeo/emo/methods/bases.py +12 -0
  71. desdeo/emo/methods/templates.py +111 -0
  72. desdeo/emo/operators/__init__.py +1 -0
  73. desdeo/emo/operators/crossover.py +1282 -0
  74. desdeo/emo/operators/evaluator.py +114 -0
  75. desdeo/emo/operators/generator.py +459 -0
  76. desdeo/emo/operators/mutation.py +1224 -0
  77. desdeo/emo/operators/scalar_selection.py +202 -0
  78. desdeo/emo/operators/selection.py +1778 -0
  79. desdeo/emo/operators/termination.py +286 -0
  80. desdeo/emo/options/__init__.py +108 -0
  81. desdeo/emo/options/algorithms.py +435 -0
  82. desdeo/emo/options/crossover.py +164 -0
  83. desdeo/emo/options/generator.py +131 -0
  84. desdeo/emo/options/mutation.py +260 -0
  85. desdeo/emo/options/repair.py +61 -0
  86. desdeo/emo/options/scalar_selection.py +66 -0
  87. desdeo/emo/options/selection.py +127 -0
  88. desdeo/emo/options/templates.py +383 -0
  89. desdeo/emo/options/termination.py +143 -0
  90. desdeo/explanations/__init__.py +6 -0
  91. desdeo/explanations/explainer.py +100 -0
  92. desdeo/explanations/utils.py +90 -0
  93. desdeo/gdm/__init__.py +22 -0
  94. desdeo/gdm/gdmtools.py +45 -0
  95. desdeo/gdm/score_bands.py +114 -0
  96. desdeo/gdm/voting_rules.py +50 -0
  97. desdeo/mcdm/__init__.py +41 -0
  98. desdeo/mcdm/enautilus.py +338 -0
  99. desdeo/mcdm/gnimbus.py +484 -0
  100. desdeo/mcdm/nautili.py +345 -0
  101. desdeo/mcdm/nautilus.py +477 -0
  102. desdeo/mcdm/nautilus_navigator.py +656 -0
  103. desdeo/mcdm/nimbus.py +417 -0
  104. desdeo/mcdm/pareto_navigator.py +269 -0
  105. desdeo/mcdm/reference_point_method.py +186 -0
  106. desdeo/problem/__init__.py +83 -0
  107. desdeo/problem/evaluator.py +561 -0
  108. desdeo/problem/external/__init__.py +18 -0
  109. desdeo/problem/external/core.py +356 -0
  110. desdeo/problem/external/pymoo_provider.py +266 -0
  111. desdeo/problem/external/runtime.py +44 -0
  112. desdeo/problem/gurobipy_evaluator.py +562 -0
  113. desdeo/problem/infix_parser.py +341 -0
  114. desdeo/problem/json_parser.py +944 -0
  115. desdeo/problem/pyomo_evaluator.py +487 -0
  116. desdeo/problem/schema.py +1829 -0
  117. desdeo/problem/simulator_evaluator.py +348 -0
  118. desdeo/problem/sympy_evaluator.py +244 -0
  119. desdeo/problem/testproblems/__init__.py +88 -0
  120. desdeo/problem/testproblems/benchmarks_server.py +120 -0
  121. desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
  122. desdeo/problem/testproblems/cake_problem.py +185 -0
  123. desdeo/problem/testproblems/dmitry_forest_problem_discrete.py +71 -0
  124. desdeo/problem/testproblems/dtlz2_problem.py +102 -0
  125. desdeo/problem/testproblems/forest_problem.py +283 -0
  126. desdeo/problem/testproblems/knapsack_problem.py +163 -0
  127. desdeo/problem/testproblems/mcwb_problem.py +831 -0
  128. desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
  129. desdeo/problem/testproblems/momip_problem.py +172 -0
  130. desdeo/problem/testproblems/multi_valued_constraints.py +119 -0
  131. desdeo/problem/testproblems/nimbus_problem.py +143 -0
  132. desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
  133. desdeo/problem/testproblems/re_problem.py +492 -0
  134. desdeo/problem/testproblems/river_pollution_problems.py +440 -0
  135. desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
  136. desdeo/problem/testproblems/simple_problem.py +351 -0
  137. desdeo/problem/testproblems/simulator_problem.py +92 -0
  138. desdeo/problem/testproblems/single_objective.py +289 -0
  139. desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
  140. desdeo/problem/testproblems/zdt_problem.py +274 -0
  141. desdeo/problem/utils.py +245 -0
  142. desdeo/tools/GenerateReferencePoints.py +181 -0
  143. desdeo/tools/__init__.py +120 -0
  144. desdeo/tools/desc_gen.py +22 -0
  145. desdeo/tools/generics.py +165 -0
  146. desdeo/tools/group_scalarization.py +3090 -0
  147. desdeo/tools/gurobipy_solver_interfaces.py +258 -0
  148. desdeo/tools/indicators_binary.py +117 -0
  149. desdeo/tools/indicators_unary.py +362 -0
  150. desdeo/tools/interaction_schema.py +38 -0
  151. desdeo/tools/intersection.py +54 -0
  152. desdeo/tools/iterative_pareto_representer.py +99 -0
  153. desdeo/tools/message.py +265 -0
  154. desdeo/tools/ng_solver_interfaces.py +199 -0
  155. desdeo/tools/non_dominated_sorting.py +134 -0
  156. desdeo/tools/patterns.py +283 -0
  157. desdeo/tools/proximal_solver.py +99 -0
  158. desdeo/tools/pyomo_solver_interfaces.py +477 -0
  159. desdeo/tools/reference_vectors.py +229 -0
  160. desdeo/tools/scalarization.py +2065 -0
  161. desdeo/tools/scipy_solver_interfaces.py +454 -0
  162. desdeo/tools/score_bands.py +627 -0
  163. desdeo/tools/utils.py +388 -0
  164. desdeo/tools/visualizations.py +67 -0
  165. desdeo/utopia_stuff/__init__.py +0 -0
  166. desdeo/utopia_stuff/data/1.json +15 -0
  167. desdeo/utopia_stuff/data/2.json +13 -0
  168. desdeo/utopia_stuff/data/3.json +15 -0
  169. desdeo/utopia_stuff/data/4.json +17 -0
  170. desdeo/utopia_stuff/data/5.json +15 -0
  171. desdeo/utopia_stuff/from_json.py +40 -0
  172. desdeo/utopia_stuff/reinit_user.py +38 -0
  173. desdeo/utopia_stuff/utopia_db_init.py +212 -0
  174. desdeo/utopia_stuff/utopia_problem.py +403 -0
  175. desdeo/utopia_stuff/utopia_problem_old.py +415 -0
  176. desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
  177. desdeo-2.1.0.dist-info/METADATA +186 -0
  178. desdeo-2.1.0.dist-info/RECORD +180 -0
  179. {desdeo-1.2.dist-info → desdeo-2.1.0.dist-info}/WHEEL +1 -1
  180. desdeo-2.1.0.dist-info/licenses/LICENSE +21 -0
  181. desdeo-1.2.dist-info/METADATA +0 -16
  182. desdeo-1.2.dist-info/RECORD +0 -4
@@ -0,0 +1,561 @@
1
+ """Defines a Polars-based evaluator."""
2
+
3
+ from enum import Enum
4
+
5
+ import numpy as np
6
+ import polars as pl
7
+
8
+ from desdeo.problem.json_parser import MathParser, replace_str
9
+ from desdeo.problem.schema import (
10
+ Constant,
11
+ ObjectiveTypeEnum,
12
+ Problem,
13
+ TensorConstant,
14
+ TensorVariable,
15
+ Variable,
16
+ )
17
+
18
+ SUPPORTED_EVALUATOR_MODES = ["variables", "discrete"]
19
+ SUPPORTED_VAR_DIMENSIONS = ["scalar", "vector"]
20
+
21
+
22
+ class PolarsEvaluatorModesEnum(str, Enum):
23
+ """Defines the supported modes for the PolarsEvaluator."""
24
+
25
+ variables = "variables"
26
+ """Indicates that the evaluator should expect decision variables vectors and
27
+ evaluate the problem with them."""
28
+ discrete = "discrete"
29
+ """Indicates that the problem is defined by discrete decision variable
30
+ vector and objective vector pairs and those should be evaluated. In this
31
+ mode, the evaluator does not expect any decision variables as arguments when
32
+ evaluating."""
33
+ mixed = "mixed"
34
+ """Indicates that the problem has analytical and simulator and/or surrogate
35
+ based objectives, constraints and extra functions. In this mode, the evaluator
36
+ only handles data-based and analytical functions. For data-bsed objectives,
37
+ it assumes that the variables are to be evaluated by finding the closest
38
+ variables values in the data compare to the input, and evaluating the result
39
+ to be the matching objective function values that match to the closest
40
+ variable values found. The evaluator should expect decision variables
41
+ vectors and evaluate the problem with them."""
42
+
43
+
44
+ class PolarsEvaluatorError(Exception):
45
+ """Error raised when exceptions are encountered in an PolarsEvaluator."""
46
+
47
+
48
+ class VariableDimensionEnum(str, Enum):
49
+ """An enumerator for the possible dimensions of the variables of a problem."""
50
+
51
+ scalar = "scalar"
52
+ """All variables are scalar valued."""
53
+ vector = "vector"
54
+ """Highest dimensional variable is a vector."""
55
+ tensor = "tensor"
56
+ """Some variable has more dimensions."""
57
+
58
+
59
+ def variable_dimension_enumerate(problem: Problem) -> VariableDimensionEnum:
60
+ """Return a VariableDimensionEnum based on the problems variables' dimensions.
61
+
62
+ This is needed as different evaluators and solvers can handle different dimensional variables.
63
+
64
+ If there are no TensorVariables in the problem, will return scalar.
65
+ If there are, at the highest, one dimensional TensorVariables, will return vector.
66
+ Else, there is at least a TensorVariable with a higher dimension, will return tensor.
67
+
68
+ Args:
69
+ problem (Problem): The problem being solved or evaluated.
70
+
71
+ Returns:
72
+ VariableDimensionEnum: The enumeration of the problems variable dimensions.
73
+ """
74
+ enum = VariableDimensionEnum.scalar
75
+ for var in problem.variables:
76
+ if isinstance(var, TensorVariable):
77
+ if len(var.shape) == 1 or len(var.shape) == 2 and not (var.shape[0] > 1 and var.shape[1] > 1): # noqa: PLR2004
78
+ enum = VariableDimensionEnum.vector
79
+ else:
80
+ return VariableDimensionEnum.tensor
81
+ return enum
82
+
83
+
84
+ class PolarsEvaluator:
85
+ """A class for creating Polars-based evaluators for multiobjective optimization problems.
86
+
87
+ The evaluator is to be used with different optimizers. PolarsEvaluator is specifically
88
+ for solvers that do not require an exact formulation of the problem, but rather work
89
+ solely on the input and output values of the problem being solved. This evaluator might not
90
+ be suitable for computationally expensive problems, or mixed-integer problems. This
91
+ evaluator is suitable for many Python-based solvers.
92
+ """
93
+
94
+ ### Initialization (no need for decision variables yet)
95
+ # 1. Create a math parser with parser type 'evaluator_type'. Defaults to 'polars'.
96
+ # 2. Check for any constants in the definition of the problem. Replace the constants, if they exist,
97
+ # with their numerical values in all the function expressions found in problem.
98
+ # 3. Parse the function expressions into a dataframe.
99
+
100
+ ### Evaluating (we have decision variables to evaluate problem)
101
+ # 1. Evaluate the extra functions (if any) in the dataframe with the decision variables. Store the results
102
+ # in new columns of the dataframe.
103
+ # 2. Evaluate the objective functions based on the decision variables and the extra function values (if any).
104
+ # Store the results in the dataframe in their own columns.
105
+ # 3. Evaluate the constraints (if any) based on the decision variables and extra function values (if any).
106
+ # Store the results in the dataframe in their own columns.
107
+ # 4. Evalute the scalarization functions (if any) based on the objective function values and extra function values
108
+ # (if any). Store the results in the dataframe in their own columns.
109
+ # 5. Return a pydantic dataclass with the results (decision variables, objective function values, constraint values,
110
+ # and scalarization function valeus).
111
+ # 6. End.
112
+
113
+ def __init__(self, problem: Problem, evaluator_mode: PolarsEvaluatorModesEnum = PolarsEvaluatorModesEnum.variables):
114
+ """Create a Polars-based evaluator for a multiobjective optimization problem.
115
+
116
+ By default, the evaluator expects a set of decision variables to
117
+ evaluate the given problem. However, if the problem is purely based on
118
+ data (e.g., it represents an approximation of a Pareto optimal front),
119
+ then the evaluator should be run in 'discrete' mode instead. In this
120
+ mode, it will return the whole problem with all of its objectives,
121
+ constraints, and scalarization functions evaluated with the current data
122
+ representing the problem.
123
+
124
+ Args:
125
+ problem (Problem): The problem as a pydantic 'Problem' data class.
126
+ evaluator_mode (str): The mode of evaluator used to parse the problem into a format
127
+ that can be evaluated. Default 'variables'.
128
+ """
129
+ # Create a MathParser of type 'evaluator_type'.
130
+ if evaluator_mode not in PolarsEvaluatorModesEnum:
131
+ msg = (
132
+ f"The provided 'evaluator_mode' '{evaluator_mode}' is not supported."
133
+ f" Must be one of {PolarsEvaluatorModesEnum}."
134
+ )
135
+ raise PolarsEvaluatorError(msg)
136
+
137
+ self.evaluator_mode = evaluator_mode
138
+
139
+ self.problem = problem
140
+ # Gather any constants of the problem definition.
141
+ self.problem_constants = problem.constants
142
+ # Gather the objective functions
143
+ if evaluator_mode == PolarsEvaluatorModesEnum.mixed:
144
+ self.problem_objectives = [
145
+ objective
146
+ for objective in problem.objectives
147
+ if objective.objective_type in [ObjectiveTypeEnum.analytical, ObjectiveTypeEnum.data_based]
148
+ ]
149
+ else:
150
+ self.problem_objectives = problem.objectives
151
+ # Gather any constraints
152
+ self.problem_constraints = problem.constraints
153
+ # Gather any extra functions
154
+ self.problem_extra = problem.extra_funcs
155
+ # Gather any scalarization functions
156
+ self.problem_scalarization = problem.scalarization_funcs
157
+ # Gather the decision variable symbols defined in the problem
158
+ self.problem_variable_symbols = [var.symbol for var in problem.variables]
159
+ # The discrete definition of (some) objectives
160
+ self.discrete_representation = problem.discrete_representation
161
+
162
+ # The below 'expressions' are list of tuples with symbol and expressions pairs, as (symbol, expression)
163
+ # These must be defined in a specialized initialization step, see further below for an example.
164
+ # Symbol and expressions pairs of the objective functions
165
+ self.objective_expressions = None
166
+ # Symbol and expressions pairs of any constraints
167
+ self.constraint_expressions = None
168
+ # Symbol and expressions pairs of any extra functions
169
+ self.extra_expressions = None
170
+ # Symbol and expression pairs of any scalarization functions
171
+ self.scalarization_expressions = None
172
+ # Store TensorConstants in a dict
173
+ self.tensor_constants = None
174
+
175
+ # Note: `self.parser` is assumed to be set before continuing the initialization.
176
+ self.parser = MathParser()
177
+ self._polars_init()
178
+
179
+ # Note, when calling an evaluate method, it is assumed the problem has been fully parsed.
180
+ if self.evaluator_mode in [PolarsEvaluatorModesEnum.variables, PolarsEvaluatorModesEnum.mixed]:
181
+ self.evaluate = self._polars_evaluate
182
+ self.evaluate_flat = self._polars_evaluate_flat
183
+ elif self.evaluator_mode == PolarsEvaluatorModesEnum.discrete:
184
+ self.evaluate = self._from_discrete_data
185
+ else:
186
+ msg = (
187
+ f"Provided 'evaluator_mode' {evaluator_mode} not supported. Must be one of {PolarsEvaluatorModesEnum}."
188
+ )
189
+
190
+ def _polars_init(self): # noqa: C901, PLR0912
191
+ """Initialization of the evaluator for parser type 'polars'."""
192
+ # If any constants are defined in problem, replace their symbol with the defined numerical
193
+ # value in all the function expressions found in the Problem.
194
+ if self.problem_constants is not None:
195
+ # Objectives are always defined, cannot be None
196
+ parsed_obj_funcs = {}
197
+ for obj in self.problem_objectives:
198
+ if obj.objective_type == ObjectiveTypeEnum.analytical:
199
+ # if analytical proceed with replacing the symbols.
200
+ tmp = obj.func
201
+
202
+ # replace regular constants, skip TensorConstants
203
+ for c in self.problem_constants:
204
+ if isinstance(c, Constant):
205
+ tmp = replace_str(tmp, c.symbol, c.value)
206
+
207
+ parsed_obj_funcs[f"{obj.symbol}"] = tmp
208
+
209
+ elif obj.objective_type == ObjectiveTypeEnum.data_based:
210
+ # data-based objective
211
+ parsed_obj_funcs[f"{obj.symbol}"] = None
212
+ else:
213
+ msg = (
214
+ f"Incorrect objective-type {obj.objective_type} encountered. "
215
+ f"Must be one of {ObjectiveTypeEnum}"
216
+ )
217
+ raise PolarsEvaluatorError(msg)
218
+
219
+ # Do the same for any constraint expressions as well.
220
+ if self.problem_constraints is not None:
221
+ parsed_cons_funcs: dict | None = {}
222
+ for con in self.problem_constraints:
223
+ tmp = con.func
224
+
225
+ # replace regular constants, skip TensorConstants
226
+ for c in self.problem_constants:
227
+ if isinstance(c, Constant):
228
+ tmp = replace_str(tmp, c.symbol, c.value)
229
+
230
+ parsed_cons_funcs[f"{con.symbol}"] = tmp
231
+ else:
232
+ parsed_cons_funcs = None
233
+
234
+ # Do the same for any extra functions
235
+ parsed_extra_funcs: dict | None = {}
236
+ if self.problem_extra is not None:
237
+ for extra in self.problem_extra:
238
+ tmp = extra.func
239
+
240
+ # replace regular constants, skip TensorConstants
241
+ for c in self.problem_constants:
242
+ if isinstance(c, Constant):
243
+ tmp = replace_str(tmp, c.symbol, c.value)
244
+
245
+ parsed_extra_funcs[f"{extra.symbol}"] = tmp
246
+ else:
247
+ parsed_extra_funcs = None
248
+
249
+ # Do the same for any scalarization functions
250
+ parsed_scal_funcs: dict | None = {}
251
+ if self.problem_scalarization is not None:
252
+ for scal in self.problem_scalarization:
253
+ tmp = scal.func
254
+
255
+ # replace regular constants, skip TensorConstants
256
+ for c in self.problem_constants:
257
+ if isinstance(c, Constant):
258
+ tmp = replace_str(tmp, c.symbol, c.value)
259
+
260
+ parsed_scal_funcs[f"{scal.symbol}"] = tmp
261
+ else:
262
+ parsed_scal_funcs = None
263
+
264
+ # Check for TensorConstants
265
+ for c in self.problem_constants:
266
+ if isinstance(c, TensorConstant):
267
+ if self.tensor_constants is None:
268
+ self.tensor_constants = {}
269
+ self.tensor_constants[c.symbol] = np.array(c.get_values())
270
+ else:
271
+ # no constants defined, just collect all expressions as they are
272
+ parsed_obj_funcs = {f"{objective.symbol}": objective.func for objective in self.problem_objectives}
273
+
274
+ if self.problem_constraints is not None:
275
+ parsed_cons_funcs = {f"{constraint.symbol}": constraint.func for constraint in self.problem_constraints}
276
+ else:
277
+ parsed_cons_funcs = None
278
+
279
+ if self.problem_extra is not None:
280
+ parsed_extra_funcs = {f"{extra.symbol}": extra.func for extra in self.problem_extra}
281
+ else:
282
+ parsed_extra_funcs = None
283
+
284
+ if self.problem_scalarization is not None:
285
+ parsed_scal_funcs = {f"{scal.symbol}": scal.func for scal in self.problem_scalarization}
286
+ else:
287
+ parsed_scal_funcs = None
288
+
289
+ # Parse all functions into expressions. These are stored as tuples, as (symbol, parsed expression)
290
+ # parse objectives
291
+ # If no expression is given (data-based objective, then the expression is set to be 'None')
292
+ self.objective_expressions = [
293
+ (symbol, self.parser.parse(expression)) if expression is not None else (symbol, None)
294
+ for symbol, expression in parsed_obj_funcs.items()
295
+ ]
296
+
297
+ # parse constraints, if any
298
+ # if a constraint is simulator or surrogate based (expression is None), set the "parsed" expression as None
299
+ if parsed_cons_funcs is not None:
300
+ self.constraint_expressions = [
301
+ (symbol, self.parser.parse(expression)) if expression is not None else (symbol, None)
302
+ for symbol, expression in parsed_cons_funcs.items()
303
+ ]
304
+ else:
305
+ self.constraint_expressions = None
306
+
307
+ # parse extra functions, if any
308
+ # if an extra function is simulator or surrogate based (expression is None), set the "parsed" expression as None
309
+ if parsed_extra_funcs is not None:
310
+ self.extra_expressions = [
311
+ (symbol, self.parser.parse(expression)) if expression is not None else (symbol, None)
312
+ for symbol, expression in parsed_extra_funcs.items()
313
+ ]
314
+ else:
315
+ self.extra_expressions = None
316
+
317
+ # parse scalarization functions, if any
318
+ if parsed_scal_funcs is not None:
319
+ self.scalarization_expressions = [
320
+ (symbol, self.parser.parse(expression)) for symbol, expression in parsed_scal_funcs.items()
321
+ ]
322
+ else:
323
+ self.scalarization_expressions = None
324
+
325
+ # store the symbol and min or max multiplier as well (symbol, min/max multiplier [1 | -1])
326
+ self.objective_mix_max_mult = [
327
+ (objective.symbol, -1 if objective.maximize else 1) for objective in self.problem_objectives
328
+ ]
329
+
330
+ # create dataframe with the discrete representation, if any exists
331
+ if self.discrete_representation is not None:
332
+ self.discrete_df = pl.DataFrame(
333
+ {**self.discrete_representation.variable_values, **self.discrete_representation.objective_values}
334
+ )
335
+ else:
336
+ self.discrete_df = None
337
+
338
+ def _polars_evaluate(
339
+ self,
340
+ xs: pl.DataFrame | dict[str, list[float | int | bool]],
341
+ ) -> pl.DataFrame:
342
+ """Evaluate the problem with the given decision variable values utilizing a polars dataframe.
343
+
344
+ Args:
345
+ xs (pl.DataFrame | dict[str, list[float | int | bool]]): a Polars dataframe or
346
+ dict with the decision variable symbols as the columns (keys)
347
+ followed by the corresponding decision variable values stored in
348
+ an array (list). The symbols must match the symbols defined for
349
+ the decision variables defined in the `Problem` being solved.
350
+ Each column (list) in the dataframe (dict) should contain the same number of values.
351
+
352
+ Returns:
353
+ pl.DataFrame: the polars dataframe with the computed results.
354
+
355
+ Note:
356
+ At least `self.objective_expressions` must be defined before calling this method.
357
+ """
358
+ # An aggregate dataframe to store intermediate evaluation results.
359
+ # agg_df = pl.DataFrame({key: np.array(value) for key, value in xs.items()})
360
+ agg_df = pl.DataFrame(
361
+ xs,
362
+ schema=[
363
+ (var.symbol, pl.Float64 if isinstance(var, Variable) else pl.Array(pl.Float64, tuple(var.shape)))
364
+ for var in self.problem.variables
365
+ ],
366
+ ) # need to make sure to provide schema for tensor variables of type Array
367
+
368
+ # Deal with TensorConstant
369
+ # agg_df.with_columns(pl.Series(np.array(2*[self.tensor_constants["W"]])).alias("W"))
370
+ if self.tensor_constants is not None:
371
+ for tc_symbol in self.tensor_constants:
372
+ agg_df = agg_df.with_columns(
373
+ pl.Series(np.array(agg_df.height * [self.tensor_constants[tc_symbol]])).alias(tc_symbol)
374
+ )
375
+
376
+ # Evaluate any extra functions and put the results in the aggregate dataframe.
377
+ # If an extra function is simulator or surrogate based (expression None), skip it here
378
+ if self.extra_expressions is not None:
379
+ for symbol, expr in self.extra_expressions:
380
+ if expr is not None:
381
+ # expression given
382
+ extra_column = agg_df.select(expr.alias(symbol))
383
+ agg_df = agg_df.hstack(extra_column)
384
+
385
+ # Evaluate the objective functions and put the results in the aggregate dataframe.
386
+ # obj_columns = agg_df.select(*[expr.alias(symbol) for symbol, expr in self.objective_expressions])
387
+ # agg_df = agg_df.hstack(obj_columns)
388
+
389
+ for symbol, expr in self.objective_expressions:
390
+ if expr is not None:
391
+ # expression given
392
+ obj_col = agg_df.select(expr.alias(symbol))
393
+ agg_df = agg_df.hstack(obj_col)
394
+ # elif self.evaluator_mode != PolarsEvaluatorModesEnum.mixed:
395
+ else:
396
+ # expr is None and there are no no simulator or surrogate based objectives,
397
+ # therefore we must get the objective function's value somehow else, usually from data
398
+ obj_col = find_closest_points(agg_df, self.discrete_df, self.problem_variable_symbols, symbol)
399
+ agg_df = agg_df.hstack(obj_col)
400
+
401
+ # Evaluate the minimization form of the objective functions
402
+ # Note that the column name of these should be 'the objective function's symbol'_min
403
+ # e.g., 'f_1' -> 'f_1_min'
404
+ min_obj_columns = agg_df.select(
405
+ *[
406
+ (min_max_mult * pl.col(f"{symbol}")).alias(f"{symbol}_min")
407
+ for symbol, min_max_mult in self.objective_mix_max_mult
408
+ ]
409
+ )
410
+ agg_df = agg_df.hstack(min_obj_columns)
411
+
412
+ # Evaluate any constraints and put the results in the aggregate dataframe
413
+ # If a constraint is simulator or surrogate based (expression None), skip it here
414
+ if self.constraint_expressions is not None:
415
+ for symbol, expr in self.constraint_expressions:
416
+ if expr is not None:
417
+ # expression given
418
+ cons_columns = agg_df.select(expr.alias(symbol))
419
+ agg_df = agg_df.hstack(cons_columns)
420
+
421
+ # Evaluate any scalarization functions and put the result in the aggregate dataframe
422
+ if self.scalarization_expressions is not None:
423
+ scal_columns = agg_df.select(*[expr.alias(symbol) for symbol, expr in self.scalarization_expressions])
424
+ agg_df = agg_df.hstack(scal_columns)
425
+
426
+ # return the dataframe and let the solver figure it out
427
+ return agg_df
428
+
429
+ def _polars_evaluate_flat(
430
+ self,
431
+ xs: pl.DataFrame | dict[str, list[float | int | bool]],
432
+ ) -> pl.DataFrame:
433
+ """Evaluate the problem with flattened variables.
434
+
435
+ Args:
436
+ xs (pl.DataFrame | dict[str, list[float | int | bool]]): a polars dataframe
437
+ or dict with flattened variables.
438
+ E.g., if the original problem has a tensor variable 'X' with shape (2,2),
439
+ then the input is expected to have entries with columns (keys) 'X_1_1', 'X_1_2',
440
+ 'X_2_1', and 'X_2_2'. The input is rebuilt and passed to
441
+ `self._evaluate`.
442
+
443
+ Note:
444
+ Each flattened variable is assumed to contain the same number of samples.
445
+ This means that if the entry 'X_1_1' of `xs` is, for example
446
+ `[1,2,3]`, this means that 'X_1_1' and all the other flattened
447
+ variables have three samples. This means also that the original
448
+ problem will be evaluated with a tensor variable with shape (2,2)
449
+ and three samples,
450
+ e.g., 'X=[[[1, 1], [1,1]], [[2, 2], [2, 2]], [[3, 3], [3, 3]]]'.
451
+
452
+ Returns:
453
+ pl.DataFrame: a dataframe with the original problem's evaluated functions.
454
+ """
455
+ # Assume all variables have the same number of samples
456
+ if isinstance(xs, dict):
457
+ xs = pl.DataFrame(xs)
458
+
459
+ unflattened_xs = pl.DataFrame()
460
+
461
+ # iterate over the variables of the problem
462
+ for var in self.problem.variables:
463
+ if isinstance(var, TensorVariable):
464
+ # construct the tensor variable
465
+
466
+ unflattened_xs = unflattened_xs.with_columns(
467
+ xs.select(pl.concat_arr(f"^{var.symbol}_.*$").alias(var.symbol).reshape((1, *var.shape)))
468
+ )
469
+
470
+ else:
471
+ # else, proceed normally
472
+ unflattened_xs = unflattened_xs.with_columns(xs[var.symbol])
473
+
474
+ # return result of regular evaluate
475
+ return self.evaluate(unflattened_xs)
476
+
477
+ def _from_discrete_data(self) -> pl.DataFrame:
478
+ """Evaluates the problem based on its discrete representation only.
479
+
480
+ Assumes that all the objective functions in the problem are of type 'data-based'.
481
+ In this case, the problem is evaluated based on its current discrete representation. Therefore,
482
+ no decision variable values are expected.
483
+
484
+ Returns:
485
+ pl.DataFrame: a polars dataframe with the evaluation results.
486
+ """
487
+ agg_df = self.discrete_df.clone()
488
+
489
+ # Evaluate any extra functions and put the results in the aggregate dataframe.
490
+ if self.extra_expressions is not None:
491
+ extra_columns = agg_df.select(*[expr.alias(symbol) for symbol, expr in self.extra_expressions])
492
+ agg_df = agg_df.hstack(extra_columns)
493
+
494
+ # Evaluate the minimization form of the objective functions
495
+ # Note that the column name of these should be 'the objective function's symbol'_min
496
+ # e.g., 'f_1' -> 'f_1_min'
497
+ min_obj_columns = agg_df.select(
498
+ *[
499
+ (min_max_mult * pl.col(f"{symbol}")).alias(f"{symbol}_min")
500
+ for symbol, min_max_mult in self.objective_mix_max_mult
501
+ ]
502
+ )
503
+
504
+ agg_df = agg_df.hstack(min_obj_columns)
505
+
506
+ # Evaluate any constraints and put the results in the aggregate dataframe
507
+ if self.constraint_expressions is not None:
508
+ cons_columns = agg_df.select(*[expr.alias(symbol) for symbol, expr in self.constraint_expressions])
509
+ agg_df = agg_df.hstack(cons_columns)
510
+
511
+ # Evaluate any scalarization functions and put the result in the aggregate dataframe
512
+ if self.scalarization_expressions is not None:
513
+ scal_columns = agg_df.select(*[expr.alias(symbol) for symbol, expr in self.scalarization_expressions])
514
+ agg_df = agg_df.hstack(scal_columns)
515
+
516
+ # no more processing needed, it is assumed a solver will handle the rest
517
+ return agg_df
518
+
519
+
520
+ def find_closest_points(
521
+ xs: pl.DataFrame, discrete_df: pl.DataFrame, variable_symbols: list[str], objective_symbol: list[str]
522
+ ) -> pl.DataFrame:
523
+ """Finds the closest points between the variable columns in xs and discrete_df.
524
+
525
+ For each row in xs, compares the `variable_symbols` columns and find the closest
526
+ point in `discrete_df`. Returns the objective value in the `objective_symbol` column in
527
+ `discrete_df` for each variable defined in `xs`, where the objective value
528
+ corresponds to the closest point of each variable in `xs` compared to `discrete_df`.
529
+
530
+ Both `xs` and `discrete_df` must have the columns `variable_symbols`. `discrete_df` must
531
+ also have the column `objective_symbol`.
532
+
533
+ Args:
534
+ xs (pl.DataFrame): a polars dataframe with the variable values we are
535
+ interested in finding the closest corresponding variable values in
536
+ `discrete_df`.
537
+ discrete_df (pl.DataFrame): a polars dataframe to compare the rows in `xs` to.
538
+ variable_symbols (list[str]): the names of the columns with decision variable values.
539
+ objective_symbol (str): the name of the column in `discrete_df` that has the objective function values.
540
+
541
+ Returns:
542
+ pl.DataFrame: a dataframe with the columns `objective_symbol` with the
543
+ objective function value that corresponds to each decision variable
544
+ vector in `xs`.
545
+ """
546
+ xs_vars_only = xs[variable_symbols]
547
+
548
+ results = []
549
+
550
+ for row in xs_vars_only.rows(named=True):
551
+ distance_expr = (
552
+ sum((pl.col(var_symbol) - row[var_symbol]) ** 2 for var_symbol in variable_symbols).sqrt().alias("distance")
553
+ )
554
+
555
+ combined_df = discrete_df.with_columns(distance_expr)
556
+
557
+ closest = combined_df.sort("distance").head(1)
558
+
559
+ results.append(closest[f"{objective_symbol}"][0])
560
+
561
+ return pl.DataFrame({f"{objective_symbol}": results})
@@ -0,0 +1,18 @@
1
+ """Export of the external module."""
2
+
3
+ from .core import ProviderParams
4
+ from .pymoo_provider import PymooProblemParams, PymooProvider, create_pymoo_problem
5
+ from .runtime import get_registry, get_resolver, register_provider, supported_schemes
6
+
7
+ # register default providers here
8
+ register_provider("pymoo", PymooProvider())
9
+
10
+ __all__ = [
11
+ "ProviderParams",
12
+ "PymooProblemParams",
13
+ "create_pymoo_problem",
14
+ "get_registry",
15
+ "get_resolver",
16
+ "register_provider",
17
+ "supported_schemes",
18
+ ]