desdeo 1.1.3__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. desdeo/__init__.py +8 -8
  2. desdeo/api/README.md +73 -0
  3. desdeo/api/__init__.py +15 -0
  4. desdeo/api/app.py +40 -0
  5. desdeo/api/config.py +69 -0
  6. desdeo/api/config.toml +53 -0
  7. desdeo/api/db.py +25 -0
  8. desdeo/api/db_init.py +79 -0
  9. desdeo/api/db_models.py +164 -0
  10. desdeo/api/malaga_db_init.py +27 -0
  11. desdeo/api/models/__init__.py +66 -0
  12. desdeo/api/models/archive.py +34 -0
  13. desdeo/api/models/preference.py +90 -0
  14. desdeo/api/models/problem.py +507 -0
  15. desdeo/api/models/reference_point_method.py +18 -0
  16. desdeo/api/models/session.py +46 -0
  17. desdeo/api/models/state.py +96 -0
  18. desdeo/api/models/user.py +51 -0
  19. desdeo/api/routers/_NAUTILUS.py +245 -0
  20. desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
  21. desdeo/api/routers/_NIMBUS.py +762 -0
  22. desdeo/api/routers/__init__.py +5 -0
  23. desdeo/api/routers/problem.py +110 -0
  24. desdeo/api/routers/reference_point_method.py +117 -0
  25. desdeo/api/routers/session.py +76 -0
  26. desdeo/api/routers/test.py +16 -0
  27. desdeo/api/routers/user_authentication.py +366 -0
  28. desdeo/api/schema.py +94 -0
  29. desdeo/api/tests/__init__.py +0 -0
  30. desdeo/api/tests/conftest.py +59 -0
  31. desdeo/api/tests/test_models.py +701 -0
  32. desdeo/api/tests/test_routes.py +216 -0
  33. desdeo/api/utils/database.py +274 -0
  34. desdeo/api/utils/logger.py +29 -0
  35. desdeo/core.py +27 -0
  36. desdeo/emo/__init__.py +29 -0
  37. desdeo/emo/hooks/archivers.py +172 -0
  38. desdeo/emo/methods/EAs.py +418 -0
  39. desdeo/emo/methods/__init__.py +0 -0
  40. desdeo/emo/methods/bases.py +59 -0
  41. desdeo/emo/operators/__init__.py +1 -0
  42. desdeo/emo/operators/crossover.py +780 -0
  43. desdeo/emo/operators/evaluator.py +118 -0
  44. desdeo/emo/operators/generator.py +356 -0
  45. desdeo/emo/operators/mutation.py +1053 -0
  46. desdeo/emo/operators/selection.py +1036 -0
  47. desdeo/emo/operators/termination.py +178 -0
  48. desdeo/explanations/__init__.py +6 -0
  49. desdeo/explanations/explainer.py +100 -0
  50. desdeo/explanations/utils.py +90 -0
  51. desdeo/mcdm/__init__.py +19 -0
  52. desdeo/mcdm/nautili.py +345 -0
  53. desdeo/mcdm/nautilus.py +477 -0
  54. desdeo/mcdm/nautilus_navigator.py +655 -0
  55. desdeo/mcdm/nimbus.py +417 -0
  56. desdeo/mcdm/pareto_navigator.py +269 -0
  57. desdeo/mcdm/reference_point_method.py +116 -0
  58. desdeo/problem/__init__.py +79 -0
  59. desdeo/problem/evaluator.py +561 -0
  60. desdeo/problem/gurobipy_evaluator.py +562 -0
  61. desdeo/problem/infix_parser.py +341 -0
  62. desdeo/problem/json_parser.py +944 -0
  63. desdeo/problem/pyomo_evaluator.py +468 -0
  64. desdeo/problem/schema.py +1808 -0
  65. desdeo/problem/simulator_evaluator.py +298 -0
  66. desdeo/problem/sympy_evaluator.py +244 -0
  67. desdeo/problem/testproblems/__init__.py +73 -0
  68. desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
  69. desdeo/problem/testproblems/dtlz2_problem.py +102 -0
  70. desdeo/problem/testproblems/forest_problem.py +275 -0
  71. desdeo/problem/testproblems/knapsack_problem.py +163 -0
  72. desdeo/problem/testproblems/mcwb_problem.py +831 -0
  73. desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
  74. desdeo/problem/testproblems/momip_problem.py +172 -0
  75. desdeo/problem/testproblems/nimbus_problem.py +143 -0
  76. desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
  77. desdeo/problem/testproblems/re_problem.py +492 -0
  78. desdeo/problem/testproblems/river_pollution_problem.py +434 -0
  79. desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
  80. desdeo/problem/testproblems/simple_problem.py +351 -0
  81. desdeo/problem/testproblems/simulator_problem.py +92 -0
  82. desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
  83. desdeo/problem/testproblems/zdt_problem.py +271 -0
  84. desdeo/problem/utils.py +245 -0
  85. desdeo/tools/GenerateReferencePoints.py +181 -0
  86. desdeo/tools/__init__.py +102 -0
  87. desdeo/tools/generics.py +145 -0
  88. desdeo/tools/gurobipy_solver_interfaces.py +258 -0
  89. desdeo/tools/indicators_binary.py +11 -0
  90. desdeo/tools/indicators_unary.py +375 -0
  91. desdeo/tools/interaction_schema.py +38 -0
  92. desdeo/tools/intersection.py +54 -0
  93. desdeo/tools/iterative_pareto_representer.py +99 -0
  94. desdeo/tools/message.py +234 -0
  95. desdeo/tools/ng_solver_interfaces.py +199 -0
  96. desdeo/tools/non_dominated_sorting.py +133 -0
  97. desdeo/tools/patterns.py +281 -0
  98. desdeo/tools/proximal_solver.py +99 -0
  99. desdeo/tools/pyomo_solver_interfaces.py +464 -0
  100. desdeo/tools/reference_vectors.py +462 -0
  101. desdeo/tools/scalarization.py +3138 -0
  102. desdeo/tools/scipy_solver_interfaces.py +454 -0
  103. desdeo/tools/score_bands.py +464 -0
  104. desdeo/tools/utils.py +320 -0
  105. desdeo/utopia_stuff/__init__.py +0 -0
  106. desdeo/utopia_stuff/data/1.json +15 -0
  107. desdeo/utopia_stuff/data/2.json +13 -0
  108. desdeo/utopia_stuff/data/3.json +15 -0
  109. desdeo/utopia_stuff/data/4.json +17 -0
  110. desdeo/utopia_stuff/data/5.json +15 -0
  111. desdeo/utopia_stuff/from_json.py +40 -0
  112. desdeo/utopia_stuff/reinit_user.py +38 -0
  113. desdeo/utopia_stuff/utopia_db_init.py +212 -0
  114. desdeo/utopia_stuff/utopia_problem.py +403 -0
  115. desdeo/utopia_stuff/utopia_problem_old.py +415 -0
  116. desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
  117. desdeo-2.0.0.dist-info/LICENSE +21 -0
  118. desdeo-2.0.0.dist-info/METADATA +168 -0
  119. desdeo-2.0.0.dist-info/RECORD +120 -0
  120. {desdeo-1.1.3.dist-info → desdeo-2.0.0.dist-info}/WHEEL +1 -1
  121. desdeo-1.1.3.dist-info/METADATA +0 -18
  122. desdeo-1.1.3.dist-info/RECORD +0 -4
@@ -0,0 +1,375 @@
1
+ """This module implements unary indicators that can be used to evaluate the quality of a single solution set.
2
+
3
+ It assumes that the solution set has been normalized just that _some_ ideal point (not necessarily the ideal point
4
+ of the set) is the origin and _some_ nadir point (not necessarily the nadir point of the set) is (1, 1, ..., 1).
5
+ The normalized solution set is assumed to be inside the bounding box [0, 1]^k where k is the number of objectives.
6
+ If these conditions are not met, the results of the indicators will not be meaningful.
7
+
8
+ Additionally, the set may be assumed to only contain mutually non-dominated solutions, depending on the indicator.
9
+
10
+ For now, we rely on pymoo for the implementation of some of the indicators.
11
+
12
+ Find more information about the indicators in:
13
+ Audet, Charles, et al. "Performance indicators in multiobjective optimization."
14
+ European journal of operational research 292.2 (2021): 397-422.
15
+ """
16
+
17
+ from warnings import warn
18
+
19
+ import numpy as np
20
+ from pydantic import BaseModel, Field
21
+ from pymoo.indicators.hv import Hypervolume
22
+ from pymoo.indicators.rmetric import RMetric
23
+ from scipy.spatial.distance import cdist
24
+ from typing import Dict
25
+
26
+
27
+ def hv(solution_set: np.ndarray, reference_point_component: float) -> float:
28
+ """Calculate the hypervolume indicator for a set of solutions.
29
+
30
+ Args:
31
+ solution_set (np.ndarray): A 2D numpy array where each row is a solution and each column is an objective value.
32
+ The solutions are assumed to be non-dominated. The solutions are assumed to be normalized within the unit
33
+ hypercube. The ideal and nadir of the set itself can lie within the hypercube, but not outside it.
34
+ reference_point_component (float): The value of the reference point component. The reference point is assumed to
35
+ be the same for all objectives. The reference point must be at least 1.
36
+
37
+ Returns:
38
+ float: The hypervolume indicator value.
39
+ """
40
+ rp = np.full(solution_set.shape[1], reference_point_component, dtype=np.float64)
41
+ ideal = np.zeros(solution_set.shape[1], dtype=np.float64)
42
+ nadir = np.ones(solution_set.shape[1], dtype=np.float64)
43
+
44
+ # Sets the ideal and nadir to (0, 0, ..., 0) and (1, 1, ..., 1) respectively.
45
+ # Turns of non-domination checks.
46
+ # Turns of normalization of the reference point
47
+ hv = Hypervolume(ref_point=rp, ideal=ideal, nadir=nadir, nds=False, norm_ref_point=False)
48
+
49
+ ind = hv(solution_set)
50
+
51
+ if ind is None:
52
+ raise ValueError("Hypervolume calculation failed.")
53
+
54
+ return float(ind)
55
+
56
+
57
+ def hv_batch(
58
+ solution_sets: dict[str, np.ndarray], reference_points_component: list[float]
59
+ ) -> dict[str, list[float | None]]:
60
+ """Calculate the hypervolume indicator for a set of solutions over a range of reference points.
61
+
62
+ Args:
63
+ solution_sets (dict[str, np.ndarray]): A dict of strings mapped to 2D numpy arrays where each array contains a
64
+ set of solutions.
65
+ Each row is a solution and each column is an objective value. The solutions are assumed to be non-dominated
66
+ within their respective sets. The solutions are assumed to be normalized within the unit hypercube. The
67
+ ideal and nadir of the set itself can lie within the hypercube, but not outside it. The sets must have the
68
+ same number of objectives/columns but can have different number of solutions/rows.
69
+ The keys of the dict are the names of the sets.
70
+ reference_points_component (list[float]): A list of the value of the reference point component. The
71
+ hypervolume is calculated for each set of solutions for each reference point component. The reference point
72
+ is assumed to be the same for all objectives. The reference point must be at least 1.
73
+
74
+ Returns:
75
+ dict[str, list[float | None]]: A dict of strings mapped to lists of hypervolume indicator values. The keys of
76
+ the dict are the names of the sets. The lists contain the hypervolume indicator values for each reference
77
+ point component. If the calculation fails, the value is set to None, and should be handled by the user.
78
+ """
79
+ hvs = {key: [] for key in solution_sets}
80
+ num_objs = solution_sets[next(iter(solution_sets.keys()))].shape[1]
81
+
82
+ for rp in reference_points_component:
83
+ hv = Hypervolume(
84
+ ref_point=np.full(num_objs, rp, dtype=np.float64),
85
+ ideal=np.zeros(num_objs, dtype=np.float64),
86
+ nadir=np.ones(num_objs, dtype=np.float64),
87
+ nds=False,
88
+ norm_ref_point=False,
89
+ )
90
+ for set_name in solution_sets:
91
+ ind = hv(solution_sets[set_name])
92
+ if ind is None:
93
+ warn("Hypervolume calculation failed. Setting value to None", category=RuntimeWarning, stacklevel=2)
94
+ hvs[set_name].append(None)
95
+ else:
96
+ hvs[set_name].append(float(ind))
97
+
98
+ return hvs
99
+
100
+
101
+ class DistanceIndicators(BaseModel):
102
+ """A container for closely related distance based indicators."""
103
+
104
+ igd: float = Field(description="The inverted generational distance indicator value.")
105
+ "The inverted generational distance indicator value."
106
+ igd_p: float = Field(
107
+ description=(
108
+ "The inverted generational distance indicator, where instead of taking arithmetic "
109
+ "mean of the distances, we take the geometric mean."
110
+ )
111
+ )
112
+ "The inverted generational distance indicator, where instead of taking arithmetic mean of the distances,"
113
+ " we take the geometric mean."
114
+ gd: float = Field(description="The generational distance indicator value.")
115
+ "The generational distance indicator value."
116
+ gd_p: float = Field(
117
+ description=(
118
+ "The generational distance indicator, where instead of taking arithmetic mean of the "
119
+ "distances, we take the geometric mean."
120
+ )
121
+ )
122
+ "The generational distance indicator, where instead of taking arithmetic mean of the distances,"
123
+ " we take the geometric mean."
124
+ ahd: float = Field(description="The average Hausdorff distance indicator value.")
125
+ "The average Hausdorff distance indicator value."
126
+
127
+
128
+ def distance_indicators(solution_set: np.ndarray, reference_set: np.ndarray, p: float = 2.0) -> DistanceIndicators:
129
+ """Calculates various distance based indicators between a solution set and a reference set.
130
+
131
+ Args:
132
+ solution_set (np.ndarray): A 2D numpy array where each row is a solution and each column is an objective value.
133
+ The solutions are assumed to be normalized within the unit hypercube. The ideal and nadir of the set itself
134
+ can lie within the hypercube, but not outside it. The solutions are assumed to be non-dominated.
135
+ reference_set (np.ndarray): A 2D numpy array where each row is a solution and each column is an objective value.
136
+ The solutions are assumed to be normalized within the unit hypercube. The ideal and nadir of the reference
137
+ set should probably be (0, 0, ..., 0) and (1, 1, ..., 1) respectively. The reference set is assumed to be
138
+ non-dominated.
139
+ p (float, optional): The power of the Minkowski metric. Set to 1 for Manhattan distance and 2 for Euclidean
140
+ distance, and np.inf (or math.inf) for Chebyshev distance. Defaults to 2.0.
141
+
142
+ Returns:
143
+ DistanceIndicators: A Pydantic class containing the IGD, IGD+, GD, GD+, and AHD indicators values.
144
+ """
145
+ distance_matrix = cdist(solution_set, reference_set, metric="minkowski", p=p)
146
+ _igd = np.min(distance_matrix, axis=0).mean()
147
+ _gd = np.min(distance_matrix, axis=1).mean()
148
+ ref_size = reference_set.shape[0]
149
+ set_size = solution_set.shape[0]
150
+
151
+ _igd_p = (_igd * ref_size) / (ref_size ** (1 / p))
152
+ _gd_p = (_gd * set_size) / (set_size ** (1 / p))
153
+ _ahd = max(_igd_p, _gd_p)
154
+ return DistanceIndicators(igd=_igd, igd_p=_igd_p, gd=_gd, gd_p=_gd_p, ahd=_ahd)
155
+
156
+
157
+ def distance_indicators_batch(
158
+ solution_sets: dict[str, np.ndarray], reference_set: np.ndarray, p: float = 2.0
159
+ ) -> dict[str, DistanceIndicators]:
160
+ """Calculate the IGD, GD, GD_P, IGD_P, and AHD for a sets of solutions.
161
+
162
+ Args:
163
+ solution_sets (dict[str, np.ndarray]): A dict of strings mapped to 2D numpy arrays where each array contains a
164
+ set of solutions. Each row is a solution and each column is an
165
+ objective value. The solutions are assumed to be normalized within
166
+ the unit hypercube. The ideal and nadir of the set itself can lie
167
+ within the hypercube, but not outside it. The solutions are assumed
168
+ to be non-dominated within their respective sets. The sets must have
169
+ the same number of objectives/columns but can have different number
170
+ of solutions/rows. The keys of the dict are the names of the sets.
171
+ reference_set (np.ndarray): A 2D numpy array where each row is a solution and each column is an objective value.
172
+ The solutions are assumed to be normalized within the unit hypercube. The ideal and nadir of the reference
173
+ set should probably be (0, 0, ..., 0) and (1, 1, ..., 1) respectively. The reference set is assumed to be
174
+ non-dominated.
175
+ p (float, optional): The power of the Minkowski metric. Set to 1 for Manhattan distance and 2 for Euclidean
176
+ distance, and np.inf (or math.inf) for Chebyshev distance. Defaults to 2.0.
177
+
178
+ Returns:
179
+ dict[str, DistanceIndicators]: A dict of strings mapped to DistanceIndicators objects. The keys of the dict are
180
+ the names of the sets. The DistanceIndicators objects contain the IGD, IGD+, GD, GD+, and AHD indicators
181
+ values. This data structure can be easily converted to a DataFrame or saved to disk as a JSON file.
182
+ """
183
+ inds = {}
184
+ for set_name in solution_sets:
185
+ inds[set_name] = distance_indicators(solution_sets[set_name], reference_set, p=p)
186
+ return inds
187
+
188
+
189
+ class IGDPlusIndicators(BaseModel):
190
+ """A container for the IGD+ distance-based indicator."""
191
+
192
+ igd_plus: float = Field(description="The modified inverted generational distance (IGD+) indicator value.")
193
+
194
+
195
+ def igd_plus_indicator(solution_set: np.ndarray, reference_set: np.ndarray, p: float = 2.0) -> IGDPlusIndicators:
196
+ """Computes the IGD+ indicator for a given solution set.
197
+
198
+ Notes:
199
+ The minimization of the objective function values is assumed.
200
+
201
+ Args:
202
+ solution_set (np.ndarray): The solution set being evaluated.
203
+ reference_set (np.ndarray): The reference Pareto front.
204
+ p (float, optional): The power of the Minkowski metric. Defaults to 2.0 (Euclidean distance).
205
+
206
+ Returns:
207
+ IGDPlusIndicators: A Pydantic class containing the IGD+ indicator value.
208
+ """
209
+ num_ref_points = reference_set.shape[0]
210
+ total_distance = 0.0
211
+
212
+ for y_p in reference_set:
213
+ min_distance = float("inf")
214
+
215
+ for y_n in solution_set:
216
+ # Compute IGD+ distance (only positive differences)
217
+ distance = np.sum(np.maximum(0, y_n - y_p) ** p) # Sum over objectives
218
+ min_distance = min(min_distance, distance) # Store the closest one
219
+
220
+ total_distance += min_distance ** (1 / p) # Apply the root AFTER summing over objectives
221
+
222
+ igd_plus_value = total_distance / num_ref_points
223
+ return IGDPlusIndicators(igd_plus=igd_plus_value)
224
+
225
+
226
+ def igd_plus_batch(
227
+ solution_sets: dict[str, np.ndarray], reference_set: np.ndarray, p: float = 2.0
228
+ ) -> dict[str, IGDPlusIndicators]:
229
+ """Computes the IGD+ indicator for multiple solution sets.
230
+
231
+ Notes:
232
+ The minimization of the objective function values is assumed.
233
+
234
+ Args:
235
+ solution_sets (dict[str, np.ndarray]): A dictionary of solution sets.
236
+ reference_set (np.ndarray): The reference Pareto front.
237
+ p (float, optional): The power of the Minkowski metric. Defaults to 2.0 (Euclidean distance).
238
+
239
+ Returns:
240
+ dict[str, IGDPlusIndicators]: A dictionary of IGDPlusIndicators.
241
+ """
242
+ results = {}
243
+ for set_name, solution_set in solution_sets.items():
244
+ results[set_name] = igd_plus_indicator(solution_set, reference_set, p)
245
+ return results
246
+
247
+ class R2Indicator(BaseModel):
248
+ r2_value: float
249
+
250
+ def tchebycheff_utility(fx: np.ndarray, lambd: np.ndarray, z_star: np.ndarray, rho: float = 0.05) -> float:
251
+ """Calculates the augmented Tchebycheff utility of a solution."""
252
+ diff = np.abs(z_star - fx)
253
+ max_term = np.max(lambd * diff)
254
+ sum_term = np.sum(diff)
255
+ return - (max_term + rho * sum_term)
256
+
257
+ def r2_indicator(
258
+ solution_set: np.ndarray,
259
+ lambda_set: np.ndarray,
260
+ z_star: np.ndarray,
261
+ rho: float = 0.05
262
+ ) -> R2Indicator:
263
+ """Computes the unary R2 indicator for a given solution set.
264
+
265
+ Args:
266
+ solution_set (np.ndarray): The Pareto front approximation.
267
+ lambda_set (np.ndarray): The set of normalized weight vectors (λ).
268
+ z_star (np.ndarray): The ideal point (must dominate or weakly dominate all solutions).
269
+ rho (float, optional): Small positive number for augmented Tchebycheff. Default is 0.05.
270
+
271
+ Returns:
272
+ R2IndicatorResult: Pydantic class with R2 value.
273
+ """
274
+ total_score = 0.0
275
+ for lambd in lambda_set:
276
+ best_score = max(
277
+ tchebycheff_utility(fx, lambd, z_star, rho)
278
+ for fx in solution_set
279
+ )
280
+ total_score += best_score
281
+
282
+ r2_value = total_score / len(lambda_set)
283
+ return R2Indicator(r2_value=r2_value)
284
+
285
+ def r2_batch(
286
+ solution_sets: Dict[str, np.ndarray],
287
+ lambda_set: np.ndarray,
288
+ z_star: np.ndarray,
289
+ rho: float = 0.05
290
+ ) -> Dict[str, R2Indicator]:
291
+ """Computes the R2 indicator for multiple solution sets.
292
+
293
+ Args:
294
+ solution_sets (dict[str, np.ndarray]): Dictionary of solution sets.
295
+ lambda_set (np.ndarray): Set of weight vectors.
296
+ z_star (np.ndarray): Ideal point.
297
+ rho (float, optional): Augmented Tchebycheff parameter.
298
+
299
+ Returns:
300
+ dict[str, R2IndicatorResult]: Dictionary of results.
301
+ """
302
+ return {
303
+ name: r2_indicator(solution_set, lambda_set, z_star, rho)
304
+ for name, solution_set in solution_sets.items()
305
+ }
306
+
307
+ class RMetricIndicators(BaseModel):
308
+ """A container for R-metric indicators: R-HV and R-IGD."""
309
+
310
+ r_hv: float = Field(description="The R-HV indicator value, based on hypervolume.")
311
+ "The R-HV indicator value, based on hypervolume."
312
+ r_igd: float = Field(description="The R-IGD indicator value, based on inverted generational distance.")
313
+ "The R-IGD indicator value, based on inverted generational distance."
314
+
315
+
316
+ def r_metric_indicator(
317
+ solution_set: np.ndarray, ref_points: np.ndarray, w: np.ndarray = None, delta: float = 0.2
318
+ ) -> RMetricIndicators:
319
+ """Calculate the R-metric (either R-HV or R-IGD) for a given solution set.
320
+
321
+ Parameters:
322
+ solution_set : np.ndarray
323
+ The set of solutions.
324
+
325
+ ref_points : np.ndarray
326
+ A set of reference points..
327
+
328
+ w : np.ndarray, optional
329
+ Weights for each objective.
330
+
331
+ delta : float, optional
332
+ Region of interest for the metric calculation.
333
+
334
+ Returns:
335
+ RMetricIndicators
336
+ An object containing the computed R-HV and R-IGD values.
337
+ """
338
+ # Calculate the Pareto front
339
+ pareto_front = get_pareto_front(solution_set)
340
+
341
+ rmetric = RMetric(problem=None, ref_points=ref_points, w=w, delta=delta, pf=pareto_front)
342
+ r_igd, r_hv = rmetric.do(solution_set)
343
+ return RMetricIndicators(r_hv=r_hv, r_igd=r_igd)
344
+
345
+
346
+ def r_metric_indicators_batch(
347
+ solution_set: dict[str, np.ndarray], ref_points: np.ndarray, w: np.ndarray = None, delta: float = 0.2
348
+ ) -> dict[str, RMetricIndicators]:
349
+ """Calculate the R-metrics (R-HV and R-IGD) for a batch of solution sets."""
350
+ inds = {}
351
+ for set_name in solution_set:
352
+ inds[set_name] = r_metric_indicator(solution_set[set_name], ref_points, w, delta)
353
+ return inds
354
+
355
+
356
+ def is_dominated(solution, other_solutions):
357
+ """Check if a solution is dominated by any other solution."""
358
+ return any(np.all(other <= solution) and np.any(other < solution) for other in other_solutions)
359
+
360
+
361
+ def get_pareto_front(solutions):
362
+ """Extract the Pareto front from a set of solutions."""
363
+ pareto_front = []
364
+ for i, solution in enumerate(solutions):
365
+ remaining_solutions = np.delete(solutions, i, axis=0)
366
+ if not is_dominated(solution, remaining_solutions):
367
+ pareto_front.append(solution)
368
+ return np.array(pareto_front)
369
+
370
+
371
+ # Additional unary indicators can be added here.
372
+ # E.g. The IGD+ indicator, R2 indicator, averaged Hausdorff distance, etc.
373
+ # The function signature should be similar the already implemented functions, if reasonable.
374
+ # Optionally, a batch version of the indicator can be added as well.
375
+ # The methods should make similar assumptions about the input data as the already implemented functions.
@@ -0,0 +1,38 @@
1
+ """The schema to represent the interactions of the user."""
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class Interaction(BaseModel):
7
+ """The tree-like structure to represent the interactions of the user."""
8
+
9
+ method_name: str = Field(description="The name of the method used for this iteration.")
10
+ preference_information: dict = Field(description="The preference information given by the user for this iteration.")
11
+ result: dict = Field(
12
+ description="The result of the iteration."
13
+ ) # In the database, this should be a foreign key to the result table
14
+ next_interaction: list["Interaction"] | None = Field(
15
+ "The next interaction in the tree. This is a list of 'interactions' as the user can choose to go back to a"
16
+ " previous iteration and change the preference information. If the user chooses to go back, just append the"
17
+ " new interactions to the list in the order they were made."
18
+ ) # In the database, this should be a foreign key to the interactions table
19
+
20
+
21
+ if __name__ == "__main__":
22
+ # An example of how to use the schema
23
+ inter = Interaction(
24
+ method_name="NIMBUS",
25
+ preference_information={"a": 1},
26
+ result={"foo": [0, 0, 1]},
27
+ next_interaction=[
28
+ Interaction(
29
+ method_name="iRVEA",
30
+ preference_information={"b": [1, 2]},
31
+ result={"baz": [9, 1, 0]},
32
+ next_interaction=None,
33
+ )
34
+ ],
35
+ )
36
+ print(inter.model_dump_json())
37
+ print()
38
+ print(Interaction.model_json_schema())
@@ -0,0 +1,54 @@
1
+ """Utility methods to check if reference vectors intersect a bounding box."""
2
+
3
+ import numpy as np
4
+
5
+
6
+ def line_box_intersection(
7
+ box_min: np.ndarray, box_max: np.ndarray, reference_points: np.ndarray, thickness
8
+ ) -> np.ndarray:
9
+ """Find the reference directions that intersect the box defined by box_min and box_max.
10
+
11
+ Args:
12
+ box_min (np.ndarray): The infimum of the box.
13
+ box_max (np.ndarray): The supremum of the box.
14
+ reference_points (np.ndarray): The reference directions.
15
+ thickness (float): The threshold for thickness. Defines the thickness of the box. The thickness is added to
16
+ the box_min and subtracted from the box_max to define the box. The reference directions that intersect
17
+ the box are marked as bad. The thickness is a hyperparameter that needs to be tuned. The default value
18
+ is 0.05.
19
+ Try out some values close to 0.05. Lower values
20
+ will result in lesser number of reference points being marked as bad. Note that a value of zero does not
21
+ imply that only reference directions that directly intersect the box are marked as bad. Floating point
22
+ shenanigans (np.isclose) happen.
23
+
24
+ Returns:
25
+ np.ndarray: A boolean array of length num_points, where True indicates that the reference direction intersects
26
+ the box.
27
+ """
28
+ # Find the reference directions that intersect the box
29
+
30
+ # Vector through the reference point = reference point + k * (nadir - ideal)
31
+
32
+ k_bmin = box_min - reference_points
33
+ k_bmax = box_max - reference_points
34
+
35
+ # Find the reference directions that intersect the box
36
+ k_min = np.min((k_bmax, k_bmin), axis=0).max(axis=1) - thickness / 2
37
+ k_max = np.max((k_bmax, k_bmin), axis=0).min(axis=1) + thickness / 2
38
+
39
+ intersect_mask = np.logical_or((k_max >= k_min), np.isclose(k_max, k_min))
40
+ return intersect_mask
41
+
42
+
43
+ def find_bad_indicesREF(solution, ref_point, reference_points, thickness):
44
+ box_max, box_min = find_bad_limits(solution, ref_point)
45
+ bad_points = line_box_intersection(box_min, box_max, reference_points, thickness)
46
+ return bad_points, box_min, box_max
47
+
48
+
49
+ def find_bad_limits(solution, ref_point, threshold=0.05):
50
+ # Find projections of solution on the ref direction
51
+ k = solution - ref_point
52
+ box_max = ref_point + k.max()
53
+ box_min = solution
54
+ return box_max, box_min - threshold / 2
@@ -0,0 +1,99 @@
1
+ """Implements the Iterative Pareto Representer algorithm."""
2
+
3
+ import numpy as np
4
+ from pydantic import BaseModel, Field
5
+ from scipy.spatial.distance import cdist
6
+
7
+ from desdeo.tools.intersection import find_bad_indicesREF
8
+
9
+
10
+ class _EvaluatedPoint(BaseModel):
11
+ reference_point: dict[str, float] = Field(
12
+ description="""Reference point used to evaluate the point.
13
+ The objective space is assumed to be normalized such that the ideal point is (0, 0, ..., 0)
14
+ and the nadir point is (1, 1, ..., 1). Naturally, all objectives are assumed to be minimized.
15
+ The reference point must lie on a plane perpendicular to the ideal-nadir line, and passing through
16
+ the nadir point. The reference point must be used together with an ASF to find an exact Pareto
17
+ optimal solution."""
18
+ )
19
+ targets: dict[str, float] = Field(
20
+ description="""Target values for each objective function. The target values are used to evaluate the point.
21
+ These are the objective function values that have been scaled between the ideal and nadir points,
22
+ and assumed to be minimized."""
23
+ )
24
+ objectives: dict[str, float] = Field(
25
+ description="""The actual objective function values of the evaluated point. These values are not scaled.
26
+ Not required for the algorithm, but useful for archiving."""
27
+ )
28
+
29
+
30
+ def choose_reference_point(
31
+ refp_array: np.ndarray,
32
+ evaluated_points: list[_EvaluatedPoint] | None = None,
33
+ ):
34
+ """Choose the next reference point to evaluate using the Iterative Pareto Representer algorithm.
35
+
36
+ Args:
37
+ refp_array (np.ndarray): The reference points to choose from.
38
+ evaluated_points (list[_EvaluatedPoint]): Already evaluated reference points and their targets.
39
+ If None, a random reference point is chosen.
40
+ """
41
+ if evaluated_points is None or len(evaluated_points) == 0:
42
+ return refp_array[np.random.choice(refp_array.shape[0])], None
43
+ bad_points_mask = _find_bad_RPs(refp_array, evaluated_points)
44
+ available_points_mask = ~bad_points_mask
45
+ solution_projections = _project(np.array([list(eval_result.targets.values()) for eval_result in evaluated_points]))
46
+ return _DSS_with_pruning(
47
+ available=refp_array[available_points_mask],
48
+ taken=np.vstack((solution_projections, refp_array[bad_points_mask])),
49
+ ), bad_points_mask
50
+
51
+
52
+ def _find_bad_RPs(
53
+ reference_points_array: np.ndarray, eval_results: list[_EvaluatedPoint], thickness: float = 0.02
54
+ ) -> np.ndarray:
55
+ """Find the reference points that will lead to repeated evaluations according to the ASF pruning rule."""
56
+ mask = np.zeros(reference_points_array.shape[0], dtype=bool)
57
+ dict_to_numpy = lambda x: np.array(list(x.values()))
58
+ for eval_result in eval_results:
59
+ bad_indices, _, _ = find_bad_indicesREF(
60
+ dict_to_numpy(eval_result.targets),
61
+ dict_to_numpy(eval_result.reference_point),
62
+ reference_points_array,
63
+ thickness,
64
+ )
65
+ bad_indices = np.where(bad_indices)[0]
66
+ mask[bad_indices] = True
67
+ return mask
68
+
69
+
70
+ def _DSS_with_pruning(
71
+ available: np.ndarray,
72
+ taken: np.ndarray,
73
+ ) -> int:
74
+ """One-liner implementation of the DSS algorithm using scipy."""
75
+ assert len(available) > 0, "No reference points available."
76
+
77
+ assert np.allclose(
78
+ available.sum(axis=1), available.shape[1]
79
+ ), "Reference points must lie on plane perpendicular to ideal-nadir line."
80
+
81
+ assert np.allclose(
82
+ taken.sum(axis=1), taken.shape[1]
83
+ ), "Reference points must lie on plane perpendicular to ideal-nadir line."
84
+
85
+ if taken is None or len(taken) == 0:
86
+ return np.random.choice(available)
87
+
88
+ distances = cdist(available, taken, metric="chebyshev").min(axis=1)
89
+
90
+ return available[np.argmax(distances)]
91
+
92
+
93
+ def _project(solutions):
94
+ """Project the solution to the reference plane defined by the reference_point and the normal vector."""
95
+ reference_point = np.ones(solutions.shape[1])
96
+ normal = reference_point / np.linalg.norm(reference_point)
97
+ perp_dist = np.atleast_2d(np.inner(solutions - reference_point, normal)).T
98
+ projected_points = solutions - perp_dist * normal
99
+ return projected_points