desdeo 1.2__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- desdeo/__init__.py +8 -8
- desdeo/adm/ADMAfsar.py +551 -0
- desdeo/adm/ADMChen.py +414 -0
- desdeo/adm/BaseADM.py +119 -0
- desdeo/adm/__init__.py +11 -0
- desdeo/api/README.md +73 -0
- desdeo/api/__init__.py +15 -0
- desdeo/api/app.py +50 -0
- desdeo/api/config.py +90 -0
- desdeo/api/config.toml +64 -0
- desdeo/api/db.py +27 -0
- desdeo/api/db_init.py +85 -0
- desdeo/api/db_models.py +164 -0
- desdeo/api/malaga_db_init.py +27 -0
- desdeo/api/models/__init__.py +266 -0
- desdeo/api/models/archive.py +23 -0
- desdeo/api/models/emo.py +128 -0
- desdeo/api/models/enautilus.py +69 -0
- desdeo/api/models/gdm/gdm_aggregate.py +139 -0
- desdeo/api/models/gdm/gdm_base.py +69 -0
- desdeo/api/models/gdm/gdm_score_bands.py +114 -0
- desdeo/api/models/gdm/gnimbus.py +138 -0
- desdeo/api/models/generic.py +104 -0
- desdeo/api/models/generic_states.py +401 -0
- desdeo/api/models/nimbus.py +158 -0
- desdeo/api/models/preference.py +128 -0
- desdeo/api/models/problem.py +717 -0
- desdeo/api/models/reference_point_method.py +18 -0
- desdeo/api/models/session.py +49 -0
- desdeo/api/models/state.py +463 -0
- desdeo/api/models/user.py +52 -0
- desdeo/api/models/utopia.py +25 -0
- desdeo/api/routers/_EMO.backup +309 -0
- desdeo/api/routers/_NAUTILUS.py +245 -0
- desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
- desdeo/api/routers/_NIMBUS.py +765 -0
- desdeo/api/routers/__init__.py +5 -0
- desdeo/api/routers/emo.py +497 -0
- desdeo/api/routers/enautilus.py +237 -0
- desdeo/api/routers/gdm/gdm_aggregate.py +234 -0
- desdeo/api/routers/gdm/gdm_base.py +420 -0
- desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_manager.py +398 -0
- desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_routers.py +377 -0
- desdeo/api/routers/gdm/gnimbus/gnimbus_manager.py +698 -0
- desdeo/api/routers/gdm/gnimbus/gnimbus_routers.py +591 -0
- desdeo/api/routers/generic.py +233 -0
- desdeo/api/routers/nimbus.py +705 -0
- desdeo/api/routers/problem.py +307 -0
- desdeo/api/routers/reference_point_method.py +93 -0
- desdeo/api/routers/session.py +100 -0
- desdeo/api/routers/test.py +16 -0
- desdeo/api/routers/user_authentication.py +520 -0
- desdeo/api/routers/utils.py +187 -0
- desdeo/api/routers/utopia.py +230 -0
- desdeo/api/schema.py +100 -0
- desdeo/api/tests/__init__.py +0 -0
- desdeo/api/tests/conftest.py +151 -0
- desdeo/api/tests/test_enautilus.py +330 -0
- desdeo/api/tests/test_models.py +1179 -0
- desdeo/api/tests/test_routes.py +1075 -0
- desdeo/api/utils/_database.py +263 -0
- desdeo/api/utils/_logger.py +29 -0
- desdeo/api/utils/database.py +36 -0
- desdeo/api/utils/emo_database.py +40 -0
- desdeo/core.py +34 -0
- desdeo/emo/__init__.py +159 -0
- desdeo/emo/hooks/archivers.py +188 -0
- desdeo/emo/methods/EAs.py +541 -0
- desdeo/emo/methods/__init__.py +0 -0
- desdeo/emo/methods/bases.py +12 -0
- desdeo/emo/methods/templates.py +111 -0
- desdeo/emo/operators/__init__.py +1 -0
- desdeo/emo/operators/crossover.py +1282 -0
- desdeo/emo/operators/evaluator.py +114 -0
- desdeo/emo/operators/generator.py +459 -0
- desdeo/emo/operators/mutation.py +1224 -0
- desdeo/emo/operators/scalar_selection.py +202 -0
- desdeo/emo/operators/selection.py +1778 -0
- desdeo/emo/operators/termination.py +286 -0
- desdeo/emo/options/__init__.py +108 -0
- desdeo/emo/options/algorithms.py +435 -0
- desdeo/emo/options/crossover.py +164 -0
- desdeo/emo/options/generator.py +131 -0
- desdeo/emo/options/mutation.py +260 -0
- desdeo/emo/options/repair.py +61 -0
- desdeo/emo/options/scalar_selection.py +66 -0
- desdeo/emo/options/selection.py +127 -0
- desdeo/emo/options/templates.py +383 -0
- desdeo/emo/options/termination.py +143 -0
- desdeo/explanations/__init__.py +6 -0
- desdeo/explanations/explainer.py +100 -0
- desdeo/explanations/utils.py +90 -0
- desdeo/gdm/__init__.py +22 -0
- desdeo/gdm/gdmtools.py +45 -0
- desdeo/gdm/score_bands.py +114 -0
- desdeo/gdm/voting_rules.py +50 -0
- desdeo/mcdm/__init__.py +41 -0
- desdeo/mcdm/enautilus.py +338 -0
- desdeo/mcdm/gnimbus.py +484 -0
- desdeo/mcdm/nautili.py +345 -0
- desdeo/mcdm/nautilus.py +477 -0
- desdeo/mcdm/nautilus_navigator.py +656 -0
- desdeo/mcdm/nimbus.py +417 -0
- desdeo/mcdm/pareto_navigator.py +269 -0
- desdeo/mcdm/reference_point_method.py +186 -0
- desdeo/problem/__init__.py +83 -0
- desdeo/problem/evaluator.py +561 -0
- desdeo/problem/external/__init__.py +18 -0
- desdeo/problem/external/core.py +356 -0
- desdeo/problem/external/pymoo_provider.py +266 -0
- desdeo/problem/external/runtime.py +44 -0
- desdeo/problem/gurobipy_evaluator.py +562 -0
- desdeo/problem/infix_parser.py +341 -0
- desdeo/problem/json_parser.py +944 -0
- desdeo/problem/pyomo_evaluator.py +487 -0
- desdeo/problem/schema.py +1829 -0
- desdeo/problem/simulator_evaluator.py +348 -0
- desdeo/problem/sympy_evaluator.py +244 -0
- desdeo/problem/testproblems/__init__.py +88 -0
- desdeo/problem/testproblems/benchmarks_server.py +120 -0
- desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
- desdeo/problem/testproblems/cake_problem.py +185 -0
- desdeo/problem/testproblems/dmitry_forest_problem_discrete.py +71 -0
- desdeo/problem/testproblems/dtlz2_problem.py +102 -0
- desdeo/problem/testproblems/forest_problem.py +283 -0
- desdeo/problem/testproblems/knapsack_problem.py +163 -0
- desdeo/problem/testproblems/mcwb_problem.py +831 -0
- desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
- desdeo/problem/testproblems/momip_problem.py +172 -0
- desdeo/problem/testproblems/multi_valued_constraints.py +119 -0
- desdeo/problem/testproblems/nimbus_problem.py +143 -0
- desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
- desdeo/problem/testproblems/re_problem.py +492 -0
- desdeo/problem/testproblems/river_pollution_problems.py +440 -0
- desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
- desdeo/problem/testproblems/simple_problem.py +351 -0
- desdeo/problem/testproblems/simulator_problem.py +92 -0
- desdeo/problem/testproblems/single_objective.py +289 -0
- desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
- desdeo/problem/testproblems/zdt_problem.py +274 -0
- desdeo/problem/utils.py +245 -0
- desdeo/tools/GenerateReferencePoints.py +181 -0
- desdeo/tools/__init__.py +120 -0
- desdeo/tools/desc_gen.py +22 -0
- desdeo/tools/generics.py +165 -0
- desdeo/tools/group_scalarization.py +3090 -0
- desdeo/tools/gurobipy_solver_interfaces.py +258 -0
- desdeo/tools/indicators_binary.py +117 -0
- desdeo/tools/indicators_unary.py +362 -0
- desdeo/tools/interaction_schema.py +38 -0
- desdeo/tools/intersection.py +54 -0
- desdeo/tools/iterative_pareto_representer.py +99 -0
- desdeo/tools/message.py +265 -0
- desdeo/tools/ng_solver_interfaces.py +199 -0
- desdeo/tools/non_dominated_sorting.py +134 -0
- desdeo/tools/patterns.py +283 -0
- desdeo/tools/proximal_solver.py +99 -0
- desdeo/tools/pyomo_solver_interfaces.py +477 -0
- desdeo/tools/reference_vectors.py +229 -0
- desdeo/tools/scalarization.py +2065 -0
- desdeo/tools/scipy_solver_interfaces.py +454 -0
- desdeo/tools/score_bands.py +627 -0
- desdeo/tools/utils.py +388 -0
- desdeo/tools/visualizations.py +67 -0
- desdeo/utopia_stuff/__init__.py +0 -0
- desdeo/utopia_stuff/data/1.json +15 -0
- desdeo/utopia_stuff/data/2.json +13 -0
- desdeo/utopia_stuff/data/3.json +15 -0
- desdeo/utopia_stuff/data/4.json +17 -0
- desdeo/utopia_stuff/data/5.json +15 -0
- desdeo/utopia_stuff/from_json.py +40 -0
- desdeo/utopia_stuff/reinit_user.py +38 -0
- desdeo/utopia_stuff/utopia_db_init.py +212 -0
- desdeo/utopia_stuff/utopia_problem.py +403 -0
- desdeo/utopia_stuff/utopia_problem_old.py +415 -0
- desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
- desdeo-2.1.0.dist-info/METADATA +186 -0
- desdeo-2.1.0.dist-info/RECORD +180 -0
- {desdeo-1.2.dist-info → desdeo-2.1.0.dist-info}/WHEEL +1 -1
- desdeo-2.1.0.dist-info/licenses/LICENSE +21 -0
- desdeo-1.2.dist-info/METADATA +0 -16
- desdeo-1.2.dist-info/RECORD +0 -4
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
"""JSON Schema for template options."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from functools import partial
|
|
8
|
+
from typing import Literal
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from desdeo.emo.hooks.archivers import NonDominatedArchive
|
|
13
|
+
from desdeo.emo.methods.templates import EMOResult, template1, template2
|
|
14
|
+
from desdeo.emo.operators.evaluator import EMOEvaluator
|
|
15
|
+
from desdeo.emo.options.crossover import (
|
|
16
|
+
CrossoverOptions,
|
|
17
|
+
crossover_constructor,
|
|
18
|
+
)
|
|
19
|
+
from desdeo.emo.options.generator import (
|
|
20
|
+
GeneratorOptions,
|
|
21
|
+
generator_constructor,
|
|
22
|
+
)
|
|
23
|
+
from desdeo.problem import Problem
|
|
24
|
+
from desdeo.tools.patterns import Publisher
|
|
25
|
+
from desdeo.tools.scalarization import add_desirability_funcs, add_iopis_funcs
|
|
26
|
+
|
|
27
|
+
from .mutation import (
|
|
28
|
+
MutationOptions,
|
|
29
|
+
mutation_constructor,
|
|
30
|
+
)
|
|
31
|
+
from .repair import NoRepairOptions, RepairOptions, repair_constructor
|
|
32
|
+
from .scalar_selection import (
|
|
33
|
+
ScalarSelectionOptions,
|
|
34
|
+
scalar_selector_constructor,
|
|
35
|
+
)
|
|
36
|
+
from .selection import ReferenceVectorOptions, SelectorOptions, selection_constructor
|
|
37
|
+
from .termination import (
|
|
38
|
+
TerminatorOptions,
|
|
39
|
+
terminator_constructor,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class InvalidTemplateError(Exception):
|
|
44
|
+
"""Exception raised for invalid template configurations."""
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class BaseTemplateOptions(BaseModel):
|
|
48
|
+
"""Base class for template options."""
|
|
49
|
+
|
|
50
|
+
model_config = {"use_attribute_docstrings": True}
|
|
51
|
+
|
|
52
|
+
crossover: CrossoverOptions
|
|
53
|
+
"""The crossover operator options."""
|
|
54
|
+
mutation: MutationOptions
|
|
55
|
+
"""The mutation operator options."""
|
|
56
|
+
selection: SelectorOptions
|
|
57
|
+
"""The selection operator options."""
|
|
58
|
+
termination: TerminatorOptions
|
|
59
|
+
"""The termination operator options."""
|
|
60
|
+
generator: GeneratorOptions
|
|
61
|
+
"""The population generator options."""
|
|
62
|
+
repair: RepairOptions = Field(default=NoRepairOptions())
|
|
63
|
+
"""The repair operator options."""
|
|
64
|
+
use_archive: bool = Field(default=True)
|
|
65
|
+
"""Whether to use an archive."""
|
|
66
|
+
seed: int = Field(default=0)
|
|
67
|
+
"""The seed for random number generation."""
|
|
68
|
+
verbosity: int = Field(default=2)
|
|
69
|
+
"""The verbosity level of the operators."""
|
|
70
|
+
algorithm_name: str
|
|
71
|
+
"""The unique name of the algorithm."""
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class Template1Options(BaseTemplateOptions):
|
|
75
|
+
"""Options for template 1.
|
|
76
|
+
|
|
77
|
+
Template 1 is used by methods such as NSGA-III and RVEA. See
|
|
78
|
+
[template1][desdeo.emo.methods.templates.template1] for
|
|
79
|
+
more details.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
name: Literal["Template1"] = Field(default="Template1", frozen=True, description="The name of the template.")
|
|
83
|
+
"""The name of the template."""
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class Template2Options(BaseTemplateOptions):
|
|
87
|
+
"""Options for template 2.
|
|
88
|
+
|
|
89
|
+
Template 2 is used by methods such as IBEA. See
|
|
90
|
+
[template2][desdeo.emo.methods.templates.template2] for
|
|
91
|
+
more details.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
name: Literal["Template2"] = Field(default="Template2", frozen=True, description="The name of the template.")
|
|
95
|
+
"""The name of the template."""
|
|
96
|
+
mate_selection: ScalarSelectionOptions = Field(description="The mate selection operator options.")
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
TemplateOptions = Template1Options | Template2Options
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ReferencePointOptions(BaseModel):
|
|
103
|
+
"""Options for providing a reference point for an EA."""
|
|
104
|
+
|
|
105
|
+
name: Literal["reference_point"] = Field(
|
|
106
|
+
default="reference_point", frozen=True, description="The name of the reference point option."
|
|
107
|
+
)
|
|
108
|
+
"""The name of the reference point option."""
|
|
109
|
+
preference: dict[str, float] = Field(
|
|
110
|
+
description="The reference point as a dictionary with objective function symbols as the keys."
|
|
111
|
+
)
|
|
112
|
+
"""The reference point as a dictionary with objective function symbols as the keys."""
|
|
113
|
+
method: Literal["Hakanen", "IOPIS"] = Field(
|
|
114
|
+
default="Hakanen", description="The method for handling the reference point."
|
|
115
|
+
)
|
|
116
|
+
"""The method for handling the reference point."""
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class DesirableRangesOptions(BaseModel):
|
|
120
|
+
"""Options for providing desirable ranges for an EA."""
|
|
121
|
+
|
|
122
|
+
name: Literal["preferred_ranges"] = Field(
|
|
123
|
+
default="preferred_ranges", frozen=True, description="The name of the preferred ranges option."
|
|
124
|
+
)
|
|
125
|
+
"""The name of the preferred ranges option."""
|
|
126
|
+
aspiration_levels: dict[str, float] = Field(
|
|
127
|
+
description="The aspiration levels as a dictionary with objective function symbols as the keys."
|
|
128
|
+
)
|
|
129
|
+
"""The aspiration levels as a dictionary with objective function symbols as the keys."""
|
|
130
|
+
reservation_levels: dict[str, float] = Field(
|
|
131
|
+
description="The reservation levels as a dictionary with objective function symbols as the keys."
|
|
132
|
+
)
|
|
133
|
+
"""The reservation levels as a dictionary with objective function symbols as the keys."""
|
|
134
|
+
method: Literal["Hakanen", "DF transformation"] = Field(
|
|
135
|
+
default="Hakanen", description="The method for handling the desirable ranges."
|
|
136
|
+
)
|
|
137
|
+
"""The method for handling the desirable ranges."""
|
|
138
|
+
desirability_levels: tuple[float, float] = Field(
|
|
139
|
+
default=(0.9, 0.1),
|
|
140
|
+
description=(
|
|
141
|
+
"The desirability levels as a tuple (high, low). Used if method is DF transformation."
|
|
142
|
+
" If None, default levels (0.9, 0.1) are used."
|
|
143
|
+
),
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class PreferredSolutionsOptions(BaseModel):
|
|
148
|
+
"""Options for providing preferred solutions for an EA."""
|
|
149
|
+
|
|
150
|
+
name: Literal["preferred_solutions"] = Field(
|
|
151
|
+
default="preferred_solutions", frozen=True, description="The name of the preferred solutions option."
|
|
152
|
+
)
|
|
153
|
+
"""The name of the preferred solutions option."""
|
|
154
|
+
preference: dict[str, list[float]] = Field(
|
|
155
|
+
description="The preferred solutions as a dictionary with objective function symbols as the keys."
|
|
156
|
+
)
|
|
157
|
+
"""The preferred solutions as a dictionary with objective function symbols as the keys."""
|
|
158
|
+
method: Literal["Hakanen"] = Field(
|
|
159
|
+
default="Hakanen", description="The method for handling the preferred solutions."
|
|
160
|
+
)
|
|
161
|
+
"""The method for handling the preferred solutions."""
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class NonPreferredSolutionsOptions(BaseModel):
|
|
165
|
+
"""Options for providing non-preferred solutions for an EA."""
|
|
166
|
+
|
|
167
|
+
name: Literal["non_preferred_solutions"] = Field(
|
|
168
|
+
default="non_preferred_solutions", frozen=True, description="The name of the non-preferred solutions option."
|
|
169
|
+
)
|
|
170
|
+
"""The name of the non-preferred solutions option."""
|
|
171
|
+
preference: dict[str, list[float]] = Field(
|
|
172
|
+
description="The non-preferred solutions as a dictionary with objective function symbols as the keys."
|
|
173
|
+
)
|
|
174
|
+
"""The non-preferred solutions as a dictionary with objective function symbols as the keys."""
|
|
175
|
+
method: Literal["Hakanen"] = Field(
|
|
176
|
+
default="Hakanen", description="The method for handling the non-preferred solutions."
|
|
177
|
+
)
|
|
178
|
+
"""The method for handling the non-preferred solutions."""
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
PreferenceOptions = (
|
|
182
|
+
ReferencePointOptions | DesirableRangesOptions | PreferredSolutionsOptions | NonPreferredSolutionsOptions
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class EMOOptions(BaseModel):
|
|
187
|
+
"""Options for configuring the EMO algorithm."""
|
|
188
|
+
|
|
189
|
+
model_config = {"use_attribute_docstrings": True}
|
|
190
|
+
|
|
191
|
+
preference: PreferenceOptions | None
|
|
192
|
+
"""The preference information for the EMO algorithm."""
|
|
193
|
+
template: TemplateOptions
|
|
194
|
+
"""The template options for the EMO algorithm."""
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def preference_handler(
|
|
198
|
+
preference: PreferenceOptions | None, problem: Problem, selection: SelectorOptions
|
|
199
|
+
) -> tuple[Problem, SelectorOptions]:
|
|
200
|
+
"""Handle the preference options for the EMO algorithm.
|
|
201
|
+
|
|
202
|
+
This function modifies the problem and selection operator based on the provided preference options. E.g., if
|
|
203
|
+
the preference method is "Hakanen", the reference vector options of the selection operator are modified to
|
|
204
|
+
include the preference information. If the preference method is "IOPIS" or "DF transformation", the problem is
|
|
205
|
+
modified to include desirability functions or IOPIS functions.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
preference (PreferenceOptions | None): The preference options.
|
|
209
|
+
problem (Problem): The optimization problem.
|
|
210
|
+
selection (SelectorOptions): The selection operator options.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
tuple[Problem, SelectorOptions]: The (modified, if necessary) problem and selection operator options.
|
|
214
|
+
|
|
215
|
+
Raises:
|
|
216
|
+
InvalidTemplateError: If the preference handling method is incompatible with the selection operator.
|
|
217
|
+
"""
|
|
218
|
+
if preference is None:
|
|
219
|
+
return problem, selection
|
|
220
|
+
|
|
221
|
+
if preference.method == "Hakanen":
|
|
222
|
+
if "reference_vector_options" not in type(selection).model_fields:
|
|
223
|
+
raise InvalidTemplateError(
|
|
224
|
+
"Preference handling with Hakanen method requires a selection operator with reference vectors."
|
|
225
|
+
)
|
|
226
|
+
if selection.name == "IBEASelector": # Technically not needed due to check above, but for shutting up linters
|
|
227
|
+
raise InvalidTemplateError("Preference handling with Hakanen method is not supported for IBEASelector.")
|
|
228
|
+
if selection.reference_vector_options is None:
|
|
229
|
+
reference_vector_options = ReferenceVectorOptions() # Use default reference vector options
|
|
230
|
+
else:
|
|
231
|
+
reference_vector_options = selection.reference_vector_options
|
|
232
|
+
if isinstance(preference, DesirableRangesOptions):
|
|
233
|
+
preference_value = {
|
|
234
|
+
obj.symbol: [preference.aspiration_levels[obj.symbol], preference.reservation_levels[obj.symbol]]
|
|
235
|
+
for obj in problem.objectives
|
|
236
|
+
}
|
|
237
|
+
else:
|
|
238
|
+
preference_value = preference.preference
|
|
239
|
+
setattr(reference_vector_options, preference.name, preference_value)
|
|
240
|
+
selection.reference_vector_options = reference_vector_options
|
|
241
|
+
return problem, selection
|
|
242
|
+
if preference.method == "IOPIS":
|
|
243
|
+
iopis_problem, _ = add_iopis_funcs(
|
|
244
|
+
problem=problem,
|
|
245
|
+
reference_point=preference.preference,
|
|
246
|
+
)
|
|
247
|
+
return iopis_problem, selection
|
|
248
|
+
if preference.method == "DF transformation":
|
|
249
|
+
df_problem, _ = add_desirability_funcs(
|
|
250
|
+
problem=problem,
|
|
251
|
+
aspiration_levels=preference.aspiration_levels,
|
|
252
|
+
reservation_levels=preference.reservation_levels,
|
|
253
|
+
desirability_levels={name: preference.desirability_levels for name in preference.aspiration_levels},
|
|
254
|
+
desirability_func="MaoMao",
|
|
255
|
+
)
|
|
256
|
+
return df_problem, selection
|
|
257
|
+
raise InvalidTemplateError(f"Unknown preference handling method: {preference.method}")
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
@dataclass
|
|
261
|
+
class ConstructorExtras:
|
|
262
|
+
"""Extra information returned by the emo_constructor."""
|
|
263
|
+
|
|
264
|
+
problem: Problem
|
|
265
|
+
"""New problem generated by the constructor (e.g. to handle preferences via IOPIS). If no new problem is generated,
|
|
266
|
+
the original problem is returned."""
|
|
267
|
+
publisher: Publisher
|
|
268
|
+
"""The publisher associated with the current solver."""
|
|
269
|
+
archive: NonDominatedArchive | None
|
|
270
|
+
"""The archive associated with the current solver, if any."""
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def emo_constructor(
|
|
274
|
+
emo_options: EMOOptions, problem: Problem, external_check: Callable[[], bool] | None = None
|
|
275
|
+
) -> tuple[Callable[[], EMOResult], ConstructorExtras]:
|
|
276
|
+
"""Construct an evolutionary algorithm from the given options.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
emo_options (EMOOptions): The options for the EMO algorithm.
|
|
280
|
+
problem (Problem): The optimization problem to solve.
|
|
281
|
+
external_check (Callable[[], bool] | None): A callable that returns True if the algorithm should stop,
|
|
282
|
+
False otherwise. By default, None.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
tuple[Callable[[], EMOResult], ConstructorExtras]: A tuple containing the template function
|
|
286
|
+
and extra information such as the (possibly modified) problem, publisher, and archive. Run the template
|
|
287
|
+
function to execute the algorithm.
|
|
288
|
+
|
|
289
|
+
Raises:
|
|
290
|
+
InvalidTemplateError: If the template configuration is invalid.
|
|
291
|
+
"""
|
|
292
|
+
publisher = Publisher()
|
|
293
|
+
|
|
294
|
+
template = emo_options.template
|
|
295
|
+
|
|
296
|
+
problem_, selector_options = preference_handler(
|
|
297
|
+
preference=emo_options.preference, problem=problem, selection=template.selection
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
evaluator = EMOEvaluator(problem=problem_, publisher=publisher, verbosity=template.verbosity)
|
|
301
|
+
|
|
302
|
+
selector = selection_constructor(
|
|
303
|
+
problem=problem_,
|
|
304
|
+
options=selector_options,
|
|
305
|
+
publisher=publisher,
|
|
306
|
+
verbosity=template.verbosity,
|
|
307
|
+
seed=template.seed,
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
generator = generator_constructor(
|
|
311
|
+
problem=problem_,
|
|
312
|
+
options=template.generator,
|
|
313
|
+
evaluator=evaluator,
|
|
314
|
+
publisher=publisher,
|
|
315
|
+
verbosity=template.verbosity,
|
|
316
|
+
seed=template.seed,
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
crossover = crossover_constructor(
|
|
320
|
+
problem=problem_,
|
|
321
|
+
options=template.crossover,
|
|
322
|
+
publisher=publisher,
|
|
323
|
+
verbosity=template.verbosity,
|
|
324
|
+
seed=template.seed,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
mutation = mutation_constructor(
|
|
328
|
+
problem=problem_,
|
|
329
|
+
options=template.mutation,
|
|
330
|
+
publisher=publisher,
|
|
331
|
+
verbosity=template.verbosity,
|
|
332
|
+
seed=template.seed,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
terminator = terminator_constructor(
|
|
336
|
+
options=template.termination,
|
|
337
|
+
publisher=publisher,
|
|
338
|
+
external_check=external_check,
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
repair = repair_constructor(options=template.repair, problem=problem_)
|
|
342
|
+
|
|
343
|
+
components = {
|
|
344
|
+
"evaluator": evaluator,
|
|
345
|
+
"generator": generator,
|
|
346
|
+
"crossover": crossover,
|
|
347
|
+
"mutation": mutation,
|
|
348
|
+
"selection": selector,
|
|
349
|
+
"terminator": terminator,
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
if template.use_archive:
|
|
353
|
+
archive = NonDominatedArchive(
|
|
354
|
+
problem=problem_,
|
|
355
|
+
publisher=publisher,
|
|
356
|
+
)
|
|
357
|
+
components["archive"] = archive
|
|
358
|
+
|
|
359
|
+
if template.name == "Template2":
|
|
360
|
+
scalar_selector = scalar_selector_constructor(
|
|
361
|
+
options=template.mate_selection,
|
|
362
|
+
publisher=publisher,
|
|
363
|
+
verbosity=template.verbosity,
|
|
364
|
+
seed=template.seed,
|
|
365
|
+
)
|
|
366
|
+
components["mate_selection"] = scalar_selector
|
|
367
|
+
|
|
368
|
+
[publisher.auto_subscribe(x) for x in components.values()]
|
|
369
|
+
[publisher.register_topics(x.provided_topics[x.verbosity], x.__class__.__name__) for x in components.values()]
|
|
370
|
+
|
|
371
|
+
consistency = publisher.check_consistency()
|
|
372
|
+
|
|
373
|
+
if not consistency[0]:
|
|
374
|
+
raise InvalidTemplateError(f"Inconsistent template configuration. See details:\n {consistency[1]}")
|
|
375
|
+
archive = components.pop("archive", None)
|
|
376
|
+
template_funcs = {
|
|
377
|
+
"Template1": template1,
|
|
378
|
+
"Template2": template2,
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
constructor_extras = ConstructorExtras(problem=problem_, publisher=publisher, archive=archive)
|
|
382
|
+
|
|
383
|
+
return (partial(template_funcs[template.name], **components, repair=repair), constructor_extras)
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""JSON Schema for termination operator options."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from typing import TYPE_CHECKING, Literal
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel, Field, model_validator
|
|
9
|
+
|
|
10
|
+
from desdeo.emo.operators.termination import (
|
|
11
|
+
BaseTerminator,
|
|
12
|
+
CompositeTerminator,
|
|
13
|
+
ExternalCheckTerminator,
|
|
14
|
+
MaxEvaluationsTerminator,
|
|
15
|
+
MaxGenerationsTerminator,
|
|
16
|
+
MaxTimeTerminator,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from desdeo.tools.patterns import Publisher
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MaxGenerationsTerminatorOptions(BaseModel):
|
|
24
|
+
"""Options for max generations terminator operator."""
|
|
25
|
+
|
|
26
|
+
name: Literal["MaxGenerationsTerminator"] = Field(
|
|
27
|
+
default="MaxGenerationsTerminator", frozen=True, description="The name of the termination operator."
|
|
28
|
+
)
|
|
29
|
+
"""The name of the termination operator."""
|
|
30
|
+
max_generations: int = Field(default=100, gt=0, description="The maximum number of generations allowed.")
|
|
31
|
+
"""The maximum number of generations allowed."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class MaxEvaluationsTerminatorOptions(BaseModel):
|
|
35
|
+
"""Options for max evaluations terminator operator."""
|
|
36
|
+
|
|
37
|
+
name: Literal["MaxEvaluationsTerminator"] = Field(
|
|
38
|
+
default="MaxEvaluationsTerminator", frozen=True, description="The name of the termination operator."
|
|
39
|
+
)
|
|
40
|
+
"""The name of the termination operator."""
|
|
41
|
+
max_evaluations: int = Field(default=10000, gt=0, description="The maximum number of evaluations allowed.")
|
|
42
|
+
"""The maximum number of evaluations allowed."""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class MaxTimeTerminatorOptions(BaseModel):
|
|
46
|
+
"""Options for max time terminator operator."""
|
|
47
|
+
|
|
48
|
+
name: Literal["MaxTimeTerminator"] = Field(
|
|
49
|
+
default="MaxTimeTerminator", frozen=True, description="The name of the termination operator."
|
|
50
|
+
)
|
|
51
|
+
"""The name of the termination operator."""
|
|
52
|
+
max_time: float = Field(default=30.0, gt=0, description="The maximum time allowed (in seconds).")
|
|
53
|
+
"""The maximum time allowed (in seconds)."""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class ExternalCheckTerminatorOptions(BaseModel):
|
|
57
|
+
"""Options for external check terminator operator. Note that the check function must be provided separately."""
|
|
58
|
+
|
|
59
|
+
name: Literal["ExternalCheckTerminator"] = Field(
|
|
60
|
+
default="ExternalCheckTerminator", frozen=True, description="The name of the termination operator."
|
|
61
|
+
)
|
|
62
|
+
"""The name of the termination operator."""
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class CompositeTerminatorOptions(BaseModel):
|
|
66
|
+
"""Options for composite terminator operator."""
|
|
67
|
+
|
|
68
|
+
name: Literal["CompositeTerminator"] = Field(
|
|
69
|
+
default="CompositeTerminator", frozen=True, description="The name of the termination operator."
|
|
70
|
+
)
|
|
71
|
+
"""The name of the termination operator."""
|
|
72
|
+
terminators: list[
|
|
73
|
+
MaxEvaluationsTerminatorOptions
|
|
74
|
+
| MaxGenerationsTerminatorOptions
|
|
75
|
+
| MaxTimeTerminatorOptions
|
|
76
|
+
| ExternalCheckTerminatorOptions
|
|
77
|
+
] = Field(default_factory=lambda: [MaxGenerationsTerminatorOptions()], description="List of terminators.")
|
|
78
|
+
"""List of terminators."""
|
|
79
|
+
mode: Literal["all", "any"] = Field(default="any", description="Whether to use logical AND or OR.")
|
|
80
|
+
"""Whether to use logical AND or OR."""
|
|
81
|
+
|
|
82
|
+
@model_validator(mode="after")
|
|
83
|
+
def check_unique_terminator_types(self):
|
|
84
|
+
"""Ensure that all terminator types in the composite are unique."""
|
|
85
|
+
types_seen = set()
|
|
86
|
+
for term in self.terminators:
|
|
87
|
+
t = type(term)
|
|
88
|
+
if t in types_seen:
|
|
89
|
+
raise ValueError(f"Duplicate terminator type: {t.__name__}")
|
|
90
|
+
types_seen.add(t)
|
|
91
|
+
return self
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
TerminatorOptions = (
|
|
95
|
+
MaxGenerationsTerminatorOptions
|
|
96
|
+
| MaxEvaluationsTerminatorOptions
|
|
97
|
+
| MaxTimeTerminatorOptions
|
|
98
|
+
| ExternalCheckTerminatorOptions
|
|
99
|
+
| CompositeTerminatorOptions
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def terminator_constructor(
|
|
104
|
+
options: TerminatorOptions, publisher: Publisher, external_check: Callable | None = None
|
|
105
|
+
) -> BaseTerminator:
|
|
106
|
+
"""Construct a termination operator.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
options (TerminatorOptions): Options for the termination operator.
|
|
110
|
+
publisher (Publisher): Publisher instance for the termination operator.
|
|
111
|
+
external_check (Callable | None, optional): External check function for the termination operator.
|
|
112
|
+
Defaults to None. Only required if using ExternalCheckTerminator.
|
|
113
|
+
|
|
114
|
+
Raises:
|
|
115
|
+
ValueError: If the options are invalid.
|
|
116
|
+
ValueError: If the external check function is required but not provided.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
BaseTerminator: Instance of the termination operator.
|
|
120
|
+
"""
|
|
121
|
+
terminators = {
|
|
122
|
+
"MaxGenerationsTerminator": MaxGenerationsTerminator,
|
|
123
|
+
"MaxEvaluationsTerminator": MaxEvaluationsTerminator,
|
|
124
|
+
"MaxTimeTerminator": MaxTimeTerminator,
|
|
125
|
+
"ExternalCheckTerminator": ExternalCheckTerminator,
|
|
126
|
+
"CompositeTerminator": CompositeTerminator,
|
|
127
|
+
}
|
|
128
|
+
options: dict = options.model_dump()
|
|
129
|
+
name = options.pop("name")
|
|
130
|
+
if name not in ("ExternalCheckTerminator", "CompositeTerminator"):
|
|
131
|
+
return terminators[name](publisher=publisher, **options)
|
|
132
|
+
if name == "ExternalCheckTerminator":
|
|
133
|
+
if external_check is None:
|
|
134
|
+
raise ValueError("External check function must be provided for ExternalCheckTerminator.")
|
|
135
|
+
return terminators[name](external_check=external_check, **options)
|
|
136
|
+
if name == "CompositeTerminator":
|
|
137
|
+
sub_terminators = []
|
|
138
|
+
for term_options in options.pop("terminators"):
|
|
139
|
+
sub_terminators.append(terminator_constructor(term_options, publisher, external_check))
|
|
140
|
+
# sub_name = term_options.pop("name")
|
|
141
|
+
# sub_terminators.append(terminators[sub_name](publisher=publisher, **term_options))
|
|
142
|
+
return CompositeTerminator(terminators=sub_terminators, publisher=publisher, mode=options["mode"])
|
|
143
|
+
raise ValueError(f"Unknown terminator name: {name}")
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
"""Explainers are defined here."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import polars as pl
|
|
5
|
+
import shap
|
|
6
|
+
from scipy.spatial import cKDTree
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ShapExplainer:
|
|
10
|
+
"""Defines a SHAP explainer for reference point based methods."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, problem_data: pl.DataFrame, input_symbols: list[str], output_symbols: list[str]):
|
|
13
|
+
"""Initialize the explainer.
|
|
14
|
+
|
|
15
|
+
Initializes the explainer with given data, and input and output symbols.
|
|
16
|
+
The data should contain the columns listed in the input and output symbols.
|
|
17
|
+
This data is then used to simulate the inputs and outputs of an (interactive)
|
|
18
|
+
multiobjective optimization method, which is used to explain the relation of its
|
|
19
|
+
inputs and outputs using SHAP values.
|
|
20
|
+
|
|
21
|
+
Note:
|
|
22
|
+
The `data` can be generated by for a reference point based based by, e.g.,
|
|
23
|
+
randomly sampling the input space and then evaluating the methods with the
|
|
24
|
+
sampled inputs to generate outputs.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
problem_data (pl.DataFrame): the data to simulate the input and
|
|
28
|
+
outputs of a multiobjective optimization method.
|
|
29
|
+
input_symbols (list[str]): the input symbols present in `data`.
|
|
30
|
+
These symbols represent the inputs to the method.
|
|
31
|
+
output_symbols (list[str]): the output symbols present in `data`.
|
|
32
|
+
These symbols represent the outputs of the method.
|
|
33
|
+
"""
|
|
34
|
+
self.data = problem_data
|
|
35
|
+
self.input_symbols = input_symbols
|
|
36
|
+
self.output_symbols = output_symbols
|
|
37
|
+
self.input_array = self.data[self.input_symbols].to_numpy()
|
|
38
|
+
self.output_array = self.data[self.output_symbols].to_numpy()
|
|
39
|
+
self.to_output_tree = cKDTree(self.input_array)
|
|
40
|
+
self.explainer = None
|
|
41
|
+
|
|
42
|
+
def setup(self, background_data: pl.DataFrame):
|
|
43
|
+
"""Setup the explainer.
|
|
44
|
+
|
|
45
|
+
Setups the SHAP explainer with the given background data. The
|
|
46
|
+
background data should have the columns `self.input_symbols`. The
|
|
47
|
+
background data is used as the background (or missing data) when
|
|
48
|
+
computing SHAP values. The mean (or expected values) of the background
|
|
49
|
+
data's output (`self.output_symbols`) will determine the baseline of the
|
|
50
|
+
SHAP values.
|
|
51
|
+
|
|
52
|
+
Note:
|
|
53
|
+
To generate a dataset with meaningful expected values, e.g., in case
|
|
54
|
+
the SHAP values are better understood by relating them to a specific baseline,
|
|
55
|
+
see `desdeo.explanations.generate_biased_mean_data`.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
background_data (pl.DataFrame): the background data.
|
|
59
|
+
"""
|
|
60
|
+
self.explainer = shap.Explainer(
|
|
61
|
+
self.evaluate,
|
|
62
|
+
masker=background_data[self.input_symbols].to_numpy(),
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
def evaluate(self, evaluate_array: np.ndarray) -> np.ndarray:
|
|
66
|
+
"""Evaluates the multiobjective optimization method represented by the data.
|
|
67
|
+
|
|
68
|
+
Note:
|
|
69
|
+
Evaluation happens by finding the closest matching input array in the
|
|
70
|
+
`self.input_array` and then using that value's corresponding output
|
|
71
|
+
as the evaluation result. Closest means lowest Euclidean distance.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
evaluate_array (np.ndarray): the inputs to the method represented by the data.
|
|
75
|
+
Can be either a single input, or an array of multiple inputs. Used mainly by
|
|
76
|
+
`self.explain_input`.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
np.ndarray: the evaluated output(s) corresponding to the input data.
|
|
80
|
+
"""
|
|
81
|
+
_, indices = self.to_output_tree.query(evaluate_array)
|
|
82
|
+
|
|
83
|
+
return self.output_array[indices]
|
|
84
|
+
|
|
85
|
+
def explain_input(self, to_be_explained: pl.DataFrame) -> dict:
|
|
86
|
+
"""Explain an input and produces SHAP values.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
to_be_explained (pl.DataFrame): the input to be explained. The
|
|
90
|
+
dataframe must have the columns defined in `self.input_symbols`.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
dict: the key 'shaps' corresponds to the computed SHAP values for
|
|
94
|
+
the input, the key 'base_values' is the baseline the SHAP values
|
|
95
|
+
were computed against, and the key 'data' is the input the SHAP
|
|
96
|
+
values were computed for.
|
|
97
|
+
"""
|
|
98
|
+
_to_be_explained = to_be_explained[self.input_symbols].to_numpy()
|
|
99
|
+
|
|
100
|
+
return self.explainer(_to_be_explained)
|