desdeo 1.2__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. desdeo/__init__.py +8 -8
  2. desdeo/api/README.md +73 -0
  3. desdeo/api/__init__.py +15 -0
  4. desdeo/api/app.py +40 -0
  5. desdeo/api/config.py +69 -0
  6. desdeo/api/config.toml +53 -0
  7. desdeo/api/db.py +25 -0
  8. desdeo/api/db_init.py +79 -0
  9. desdeo/api/db_models.py +164 -0
  10. desdeo/api/malaga_db_init.py +27 -0
  11. desdeo/api/models/__init__.py +66 -0
  12. desdeo/api/models/archive.py +34 -0
  13. desdeo/api/models/preference.py +90 -0
  14. desdeo/api/models/problem.py +507 -0
  15. desdeo/api/models/reference_point_method.py +18 -0
  16. desdeo/api/models/session.py +46 -0
  17. desdeo/api/models/state.py +96 -0
  18. desdeo/api/models/user.py +51 -0
  19. desdeo/api/routers/_NAUTILUS.py +245 -0
  20. desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
  21. desdeo/api/routers/_NIMBUS.py +762 -0
  22. desdeo/api/routers/__init__.py +5 -0
  23. desdeo/api/routers/problem.py +110 -0
  24. desdeo/api/routers/reference_point_method.py +117 -0
  25. desdeo/api/routers/session.py +76 -0
  26. desdeo/api/routers/test.py +16 -0
  27. desdeo/api/routers/user_authentication.py +366 -0
  28. desdeo/api/schema.py +94 -0
  29. desdeo/api/tests/__init__.py +0 -0
  30. desdeo/api/tests/conftest.py +59 -0
  31. desdeo/api/tests/test_models.py +701 -0
  32. desdeo/api/tests/test_routes.py +216 -0
  33. desdeo/api/utils/database.py +274 -0
  34. desdeo/api/utils/logger.py +29 -0
  35. desdeo/core.py +27 -0
  36. desdeo/emo/__init__.py +29 -0
  37. desdeo/emo/hooks/archivers.py +172 -0
  38. desdeo/emo/methods/EAs.py +418 -0
  39. desdeo/emo/methods/__init__.py +0 -0
  40. desdeo/emo/methods/bases.py +59 -0
  41. desdeo/emo/operators/__init__.py +1 -0
  42. desdeo/emo/operators/crossover.py +780 -0
  43. desdeo/emo/operators/evaluator.py +118 -0
  44. desdeo/emo/operators/generator.py +356 -0
  45. desdeo/emo/operators/mutation.py +1053 -0
  46. desdeo/emo/operators/selection.py +1036 -0
  47. desdeo/emo/operators/termination.py +178 -0
  48. desdeo/explanations/__init__.py +6 -0
  49. desdeo/explanations/explainer.py +100 -0
  50. desdeo/explanations/utils.py +90 -0
  51. desdeo/mcdm/__init__.py +19 -0
  52. desdeo/mcdm/nautili.py +345 -0
  53. desdeo/mcdm/nautilus.py +477 -0
  54. desdeo/mcdm/nautilus_navigator.py +655 -0
  55. desdeo/mcdm/nimbus.py +417 -0
  56. desdeo/mcdm/pareto_navigator.py +269 -0
  57. desdeo/mcdm/reference_point_method.py +116 -0
  58. desdeo/problem/__init__.py +79 -0
  59. desdeo/problem/evaluator.py +561 -0
  60. desdeo/problem/gurobipy_evaluator.py +562 -0
  61. desdeo/problem/infix_parser.py +341 -0
  62. desdeo/problem/json_parser.py +944 -0
  63. desdeo/problem/pyomo_evaluator.py +468 -0
  64. desdeo/problem/schema.py +1808 -0
  65. desdeo/problem/simulator_evaluator.py +298 -0
  66. desdeo/problem/sympy_evaluator.py +244 -0
  67. desdeo/problem/testproblems/__init__.py +73 -0
  68. desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
  69. desdeo/problem/testproblems/dtlz2_problem.py +102 -0
  70. desdeo/problem/testproblems/forest_problem.py +275 -0
  71. desdeo/problem/testproblems/knapsack_problem.py +163 -0
  72. desdeo/problem/testproblems/mcwb_problem.py +831 -0
  73. desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
  74. desdeo/problem/testproblems/momip_problem.py +172 -0
  75. desdeo/problem/testproblems/nimbus_problem.py +143 -0
  76. desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
  77. desdeo/problem/testproblems/re_problem.py +492 -0
  78. desdeo/problem/testproblems/river_pollution_problem.py +434 -0
  79. desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
  80. desdeo/problem/testproblems/simple_problem.py +351 -0
  81. desdeo/problem/testproblems/simulator_problem.py +92 -0
  82. desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
  83. desdeo/problem/testproblems/zdt_problem.py +271 -0
  84. desdeo/problem/utils.py +245 -0
  85. desdeo/tools/GenerateReferencePoints.py +181 -0
  86. desdeo/tools/__init__.py +102 -0
  87. desdeo/tools/generics.py +145 -0
  88. desdeo/tools/gurobipy_solver_interfaces.py +258 -0
  89. desdeo/tools/indicators_binary.py +11 -0
  90. desdeo/tools/indicators_unary.py +375 -0
  91. desdeo/tools/interaction_schema.py +38 -0
  92. desdeo/tools/intersection.py +54 -0
  93. desdeo/tools/iterative_pareto_representer.py +99 -0
  94. desdeo/tools/message.py +234 -0
  95. desdeo/tools/ng_solver_interfaces.py +199 -0
  96. desdeo/tools/non_dominated_sorting.py +133 -0
  97. desdeo/tools/patterns.py +281 -0
  98. desdeo/tools/proximal_solver.py +99 -0
  99. desdeo/tools/pyomo_solver_interfaces.py +464 -0
  100. desdeo/tools/reference_vectors.py +462 -0
  101. desdeo/tools/scalarization.py +3138 -0
  102. desdeo/tools/scipy_solver_interfaces.py +454 -0
  103. desdeo/tools/score_bands.py +464 -0
  104. desdeo/tools/utils.py +320 -0
  105. desdeo/utopia_stuff/__init__.py +0 -0
  106. desdeo/utopia_stuff/data/1.json +15 -0
  107. desdeo/utopia_stuff/data/2.json +13 -0
  108. desdeo/utopia_stuff/data/3.json +15 -0
  109. desdeo/utopia_stuff/data/4.json +17 -0
  110. desdeo/utopia_stuff/data/5.json +15 -0
  111. desdeo/utopia_stuff/from_json.py +40 -0
  112. desdeo/utopia_stuff/reinit_user.py +38 -0
  113. desdeo/utopia_stuff/utopia_db_init.py +212 -0
  114. desdeo/utopia_stuff/utopia_problem.py +403 -0
  115. desdeo/utopia_stuff/utopia_problem_old.py +415 -0
  116. desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
  117. desdeo-2.0.0.dist-info/LICENSE +21 -0
  118. desdeo-2.0.0.dist-info/METADATA +168 -0
  119. desdeo-2.0.0.dist-info/RECORD +120 -0
  120. {desdeo-1.2.dist-info → desdeo-2.0.0.dist-info}/WHEEL +1 -1
  121. desdeo-1.2.dist-info/METADATA +0 -16
  122. desdeo-1.2.dist-info/RECORD +0 -4
@@ -0,0 +1,1036 @@
1
+ """The base class for selection operators.
2
+
3
+ This whole file should be rewritten. Everything is a mess. Moreover, the selectors do not yet take seeds as input for reproducibility.
4
+ TODO:@light-weaver
5
+ """
6
+
7
+ import warnings
8
+ from abc import abstractmethod
9
+ from collections.abc import Sequence
10
+ from enum import Enum
11
+ from itertools import combinations
12
+ from typing import Literal, TypedDict, TypeVar
13
+
14
+ import numpy as np
15
+ import polars as pl
16
+ from scipy.special import comb
17
+ from scipy.stats.qmc import LatinHypercube
18
+
19
+ from desdeo.problem import Problem
20
+ from desdeo.tools import get_corrected_ideal_and_nadir
21
+ from desdeo.tools.message import (
22
+ Array2DMessage,
23
+ DictMessage,
24
+ Message,
25
+ PolarsDataFrameMessage,
26
+ SelectorMessageTopics,
27
+ TerminatorMessageTopics,
28
+ )
29
+ from desdeo.tools.non_dominated_sorting import fast_non_dominated_sort
30
+ from desdeo.tools.patterns import Subscriber
31
+
32
+ SolutionType = TypeVar("SolutionType", list, pl.DataFrame)
33
+
34
+
35
+ class BaseSelector(Subscriber):
36
+ """A base class for selection operators."""
37
+
38
+ def __init__(self, problem: Problem, **kwargs):
39
+ """Initialize a selection operator."""
40
+ super().__init__(**kwargs)
41
+ self.problem = problem
42
+ self.variable_symbols = [x.symbol for x in problem.get_flattened_variables()]
43
+ self.objective_symbols = [x.symbol for x in problem.objectives]
44
+
45
+ if problem.scalarization_funcs is None:
46
+ self.target_symbols = [f"{x.symbol}_min" for x in problem.objectives]
47
+ try:
48
+ ideal, nadir = get_corrected_ideal_and_nadir(problem)
49
+ self.ideal = np.array([ideal[x.symbol] for x in problem.objectives])
50
+ self.nadir = np.array([nadir[x.symbol] for x in problem.objectives]) if nadir is not None else None
51
+ except ValueError: # in case the ideal and nadir are not provided
52
+ self.ideal = None
53
+ self.nadir = None
54
+ else:
55
+ self.target_symbols = [x.symbol for x in problem.scalarization_funcs if x.symbol is not None]
56
+ self.ideal: np.ndarray | None = None
57
+ self.nadir: np.ndarray | None = None
58
+ if problem.constraints is None:
59
+ self.constraints_symbols = None
60
+ else:
61
+ self.constraints_symbols = [x.symbol for x in problem.constraints]
62
+ self.num_dims = len(self.target_symbols)
63
+
64
+ @abstractmethod
65
+ def do(
66
+ self,
67
+ parents: tuple[SolutionType, pl.DataFrame],
68
+ offsprings: tuple[SolutionType, pl.DataFrame],
69
+ ) -> tuple[SolutionType, pl.DataFrame]:
70
+ """Perform the selection operation.
71
+
72
+ Args:
73
+ parents (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
74
+ The second element is the objective values, targets, and constraint violations.
75
+ offsprings (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
76
+ The second element is the objective values, targets, and constraint violations.
77
+
78
+ Returns:
79
+ tuple[SolutionType, pl.DataFrame]: The selected decision variables and their objective values,
80
+ targets, and constraint violations.
81
+ """
82
+
83
+
84
+ class ReferenceVectorOptions(TypedDict, total=False):
85
+ """The options for the reference vector based selection operators."""
86
+
87
+ adaptation_frequency: int
88
+ """Number of generations between reference vector adaptation. If set to 0, no adaptation occurs. Defaults to 100.
89
+ Only used if `interactive_adaptation` is set to "none"."""
90
+ creation_type: Literal["simplex", "s_energy"]
91
+ """The method for creating reference vectors. Defaults to "simplex".
92
+ Currently only "simplex" is implemented. Future versions will include "s_energy".
93
+
94
+ If set to "simplex", the reference vectors are created using the simplex lattice design method.
95
+ This method is generates distributions with specific numbers of reference vectors.
96
+ Check: https://www.itl.nist.gov/div898/handbook/pri/section5/pri542.htm for more information.
97
+
98
+ If set to "s_energy", the reference vectors are created using the Riesz s-energy criterion. This method is used to
99
+ distribute an arbitrary number of reference vectors in the objective space while minimizing the s-energy.
100
+ Currently not implemented.
101
+ """
102
+ vector_type: Literal["spherical", "planar"]
103
+ """The method for normalizing the reference vectors. Defaults to "spherical"."""
104
+ lattice_resolution: int
105
+ """Number of divisions along an axis when creating the simplex lattice. This is not required/used for the "s_energy"
106
+ method. If not specified, the lattice resolution is calculated based on the `number_of_vectors`.
107
+ """
108
+ number_of_vectors: int
109
+ """Number of reference vectors to be created. If "simplex" is selected as the `creation_type`, then the closest
110
+ `lattice_resolution` is calculated based on this value. If "s_energy" is selected, then this value is used directly.
111
+ Note that if neither `lattice_resolution` nor `number_of_vectors` is specified, the number of vectors defaults to
112
+ 500.
113
+ """
114
+ interactive_adaptation: Literal[
115
+ "preferred_solutions", "non_preferred_solutions", "preferred_ranges", "reference_point", "none"
116
+ ]
117
+ """The method for adapting reference vectors based on the Decision maker's preference information.
118
+ Defaults to "none".
119
+ """
120
+ adaptation_distance: float
121
+ """Distance parameter for the interactive adaptation methods. Defaults to 0.2."""
122
+ reference_point: dict[str, float]
123
+ """The reference point for interactive adaptation."""
124
+ preferred_solutions: dict[str, list[float]]
125
+ """The preferred solutions for interactive adaptation."""
126
+ non_preferred_solutions: dict[str, list[float]]
127
+ """The non-preferred solutions for interactive adaptation."""
128
+ preferred_ranges: dict[str, list[float]]
129
+ """The preferred ranges for interactive adaptation."""
130
+
131
+
132
+ class BaseDecompositionSelector(BaseSelector):
133
+ """Base class for decomposition based selection operators."""
134
+
135
+ def __init__(self, problem: Problem, reference_vector_options: ReferenceVectorOptions, **kwargs):
136
+ super().__init__(problem, **kwargs)
137
+ self.reference_vector_options = reference_vector_options
138
+ self.reference_vectors: np.ndarray
139
+ self.reference_vectors_initial: np.ndarray
140
+
141
+ # Set default values
142
+ if "creation_type" not in self.reference_vector_options:
143
+ self.reference_vector_options["creation_type"] = "simplex"
144
+ if "vector_type" not in self.reference_vector_options:
145
+ self.reference_vector_options["vector_type"] = "spherical"
146
+ if "adaptation_frequency" not in self.reference_vector_options:
147
+ self.reference_vector_options["adaptation_frequency"] = 100
148
+ if self.reference_vector_options["creation_type"] == "simplex":
149
+ self._create_simplex()
150
+ elif self.reference_vector_options["creation_type"] == "s_energy":
151
+ raise NotImplementedError("Riesz s-energy criterion is not yet implemented.")
152
+
153
+ if "interactive_adaptation" not in self.reference_vector_options:
154
+ self.reference_vector_options["interactive_adaptation"] = "none"
155
+ elif self.reference_vector_options["interactive_adaptation"] != "none":
156
+ self.reference_vector_options["adaptation_frequency"] = 0
157
+ if "adaptation_distance" not in self.reference_vector_options:
158
+ self.reference_vector_options["adaptation_distance"] = 0.2
159
+ self._create_simplex()
160
+
161
+ if self.reference_vector_options["interactive_adaptation"] == "reference_point":
162
+ if "reference_point" not in self.reference_vector_options:
163
+ raise ValueError("Reference point must be specified for interactive adaptation.")
164
+ self.interactive_adapt_3(
165
+ np.array([self.reference_vector_options["reference_point"][x] for x in self.target_symbols]),
166
+ translation_param=self.reference_vector_options["adaptation_distance"],
167
+ )
168
+ elif self.reference_vector_options["interactive_adaptation"] == "preferred_solutions":
169
+ if "preferred_solutions" not in self.reference_vector_options:
170
+ raise ValueError("Preferred solutions must be specified for interactive adaptation.")
171
+ self.interactive_adapt_1(
172
+ np.array([self.reference_vector_options["preferred_solutions"][x] for x in self.target_symbols]).T,
173
+ translation_param=self.reference_vector_options["adaptation_distance"],
174
+ )
175
+ elif self.reference_vector_options["interactive_adaptation"] == "non_preferred_solutions":
176
+ if "non_preferred_solutions" not in self.reference_vector_options:
177
+ raise ValueError("Non-preferred solutions must be specified for interactive adaptation.")
178
+ self.interactive_adapt_2(
179
+ np.array([self.reference_vector_options["non_preferred_solutions"][x] for x in self.target_symbols]).T,
180
+ predefined_distance=self.reference_vector_options["adaptation_distance"],
181
+ )
182
+ elif self.reference_vector_options["interactive_adaptation"] == "preferred_ranges":
183
+ if "preferred_ranges" not in self.reference_vector_options:
184
+ raise ValueError("Preferred ranges must be specified for interactive adaptation.")
185
+ self.interactive_adapt_4(
186
+ np.array([self.reference_vector_options["preferred_ranges"][x] for x in self.target_symbols]).T,
187
+ )
188
+
189
+ def _create_simplex(self):
190
+ """Create the reference vectors using simplex lattice design."""
191
+
192
+ def approx_lattice_resolution(number_of_vectors: int, num_dims: int) -> int:
193
+ """Approximate the lattice resolution based on the number of vectors."""
194
+ temp_lattice_resolution = 0
195
+ while True:
196
+ temp_lattice_resolution += 1
197
+ temp_number_of_vectors = comb(
198
+ temp_lattice_resolution + num_dims - 1,
199
+ num_dims - 1,
200
+ exact=True,
201
+ )
202
+ if temp_number_of_vectors > number_of_vectors:
203
+ break
204
+ return temp_lattice_resolution - 1
205
+
206
+ if "lattice_resolution" in self.reference_vector_options:
207
+ lattice_resolution = self.reference_vector_options["lattice_resolution"]
208
+ elif "number_of_vectors" in self.reference_vector_options:
209
+ lattice_resolution = approx_lattice_resolution(
210
+ self.reference_vector_options["number_of_vectors"], num_dims=self.num_dims
211
+ )
212
+ else:
213
+ lattice_resolution = approx_lattice_resolution(500, num_dims=self.num_dims)
214
+
215
+ number_of_vectors: int = comb(
216
+ lattice_resolution + self.num_dims - 1,
217
+ self.num_dims - 1,
218
+ exact=True,
219
+ )
220
+
221
+ self.reference_vector_options["number_of_vectors"] = number_of_vectors
222
+ self.reference_vector_options["lattice_resolution"] = lattice_resolution
223
+
224
+ temp1 = range(1, self.num_dims + lattice_resolution)
225
+ temp1 = np.array(list(combinations(temp1, self.num_dims - 1)))
226
+ temp2 = np.array([range(self.num_dims - 1)] * number_of_vectors)
227
+ temp = temp1 - temp2 - 1
228
+ weight = np.zeros((number_of_vectors, self.num_dims), dtype=int)
229
+ weight[:, 0] = temp[:, 0]
230
+ for i in range(1, self.num_dims - 1):
231
+ weight[:, i] = temp[:, i] - temp[:, i - 1]
232
+ weight[:, -1] = lattice_resolution - temp[:, -1]
233
+ self.reference_vectors = weight / lattice_resolution
234
+ self.reference_vectors_initial = np.copy(self.reference_vectors)
235
+ self._normalize_rvs()
236
+
237
+ def _normalize_rvs(self):
238
+ """Normalize the reference vectors to a unit hypersphere."""
239
+ if self.reference_vector_options["vector_type"] == "spherical":
240
+ norm = np.linalg.norm(self.reference_vectors, axis=1).reshape(-1, 1)
241
+ norm[norm == 0] = np.finfo(float).eps
242
+ elif self.reference_vector_options["vector_type"] == "planar":
243
+ norm = np.sum(self.reference_vectors, axis=1).reshape(-1, 1)
244
+ else:
245
+ raise ValueError("Invalid vector type. Must be either 'spherical' or 'planar'.")
246
+ self.reference_vectors = np.divide(self.reference_vectors, norm)
247
+
248
+ def interactive_adapt_1(self, z: np.ndarray, translation_param: float) -> None:
249
+ """Adapt reference vectors using the information about prefererred solution(s) selected by the Decision maker.
250
+
251
+ Args:
252
+ z (np.ndarray): Preferred solution(s).
253
+ translation_param (float): Parameter determining how close the reference vectors are to the central vector
254
+ **v** defined by using the selected solution(s) z.
255
+ """
256
+ if z.shape[0] == 1:
257
+ # single preferred solution
258
+ # calculate new reference vectors
259
+ self.reference_vectors = translation_param * self.reference_vectors_initial + ((1 - translation_param) * z)
260
+
261
+ else:
262
+ # multiple preferred solutions
263
+ # calculate new reference vectors for each preferred solution
264
+ values = [translation_param * self.reference_vectors_initial + ((1 - translation_param) * z_i) for z_i in z]
265
+
266
+ # combine arrays of reference vectors into a single array and update reference vectors
267
+ self.reference_vectors = np.concatenate(values)
268
+
269
+ self._normalize_rvs()
270
+ self.add_edge_vectors()
271
+
272
+ def interactive_adapt_2(self, z: np.ndarray, predefined_distance: float) -> None:
273
+ """Adapt reference vectors by using the information about non-preferred solution(s) selected by the Decision maker.
274
+
275
+ After the Decision maker has specified non-preferred solution(s), Euclidian distance between normalized solution
276
+ vector(s) and each of the reference vectors are calculated. Those reference vectors that are **closer** than a
277
+ predefined distance are either **removed** or **re-positioned** somewhere else.
278
+
279
+ Note:
280
+ At the moment, only the **removal** of reference vectors is supported. Repositioning of the reference
281
+ vectors is **not** supported.
282
+
283
+ Note:
284
+ In case the Decision maker specifies multiple non-preferred solutions, the reference vector(s) for which the
285
+ distance to **any** of the non-preferred solutions is less than predefined distance are removed.
286
+
287
+ Note:
288
+ Future developer should implement a way for a user to say: "Remove some percentage of
289
+ objecive space/reference vectors" rather than giving a predefined distance value.
290
+
291
+ Args:
292
+ z (np.ndarray): Non-preferred solution(s).
293
+ predefined_distance (float): The reference vectors that are closer than this distance are either removed or
294
+ re-positioned somewhere else.
295
+ Default value: 0.2
296
+ """
297
+ # calculate L1 norm of non-preferred solution(s)
298
+ z = np.atleast_2d(z)
299
+ norm = np.linalg.norm(z, ord=2, axis=1).reshape(np.shape(z)[0], 1)
300
+
301
+ # non-preferred solutions normalized
302
+ v_c = np.divide(z, norm)
303
+
304
+ # distances from non-preferred solution(s) to each reference vector
305
+ distances = np.array(
306
+ [
307
+ list(
308
+ map(
309
+ lambda solution: np.linalg.norm(solution - value, ord=2),
310
+ v_c,
311
+ )
312
+ )
313
+ for value in self.reference_vectors
314
+ ]
315
+ )
316
+
317
+ # find out reference vectors that are not closer than threshold value to any non-preferred solution
318
+ mask = [all(d >= predefined_distance) for d in distances]
319
+
320
+ # set those reference vectors that met previous condition as new reference vectors, drop others
321
+ self.reference_vectors = self.reference_vectors[mask]
322
+
323
+ self._normalize_rvs()
324
+ self.add_edge_vectors()
325
+
326
+ def interactive_adapt_3(self, ref_point, translation_param):
327
+ """Adapt reference vectors linearly towards a reference point. Then normalize.
328
+
329
+ The details can be found in the following paper: Hakanen, Jussi &
330
+ Chugh, Tinkle & Sindhya, Karthik & Jin, Yaochu & Miettinen, Kaisa.
331
+ (2016). Connections of Reference Vectors and Different Types of
332
+ Preference Information in Interactive Multiobjective Evolutionary
333
+ Algorithms.
334
+
335
+ Parameters
336
+ ----------
337
+ ref_point :
338
+
339
+ translation_param :
340
+ (Default value = 0.2)
341
+
342
+ """
343
+ self.reference_vectors = self.reference_vectors_initial * translation_param + (
344
+ (1 - translation_param) * ref_point
345
+ )
346
+ self._normalize_rvs()
347
+ self.add_edge_vectors()
348
+
349
+ def interactive_adapt_4(self, preferred_ranges: np.ndarray) -> None:
350
+ """Adapt reference vectors by using the information about the Decision maker's preferred range for each of the objective.
351
+
352
+ Using these ranges, Latin hypercube sampling is applied to generate m number of samples between
353
+ within these ranges, where m is the number of reference vectors. Normalized vectors constructed of these samples
354
+ are then set as new reference vectors.
355
+
356
+ Args:
357
+ preferred_ranges (np.ndarray): Preferred lower and upper bound for each of the objective function values.
358
+ """
359
+ # bounds
360
+ lower_limits = np.min(preferred_ranges, axis=0)
361
+ upper_limits = np.max(preferred_ranges, axis=0)
362
+
363
+ # generate samples using Latin hypercube sampling
364
+ lhs = LatinHypercube(d=self.num_dims)
365
+ w = lhs.random(n=self.reference_vectors_initial.shape[0])
366
+
367
+ # scale between bounds
368
+ w = w * (upper_limits - lower_limits) + lower_limits
369
+
370
+ # set new reference vectors and normalize them
371
+ self.reference_vectors = w
372
+ self._normalize_rvs()
373
+ self.add_edge_vectors()
374
+
375
+ def add_edge_vectors(self):
376
+ """Add edge vectors to the list of reference vectors.
377
+
378
+ Used to cover the entire orthant when preference information is
379
+ provided.
380
+
381
+ """
382
+ edge_vectors = np.eye(self.reference_vectors.shape[1])
383
+ self.reference_vectors = np.vstack([self.reference_vectors, edge_vectors])
384
+ self._normalize_rvs()
385
+
386
+
387
+ class ParameterAdaptationStrategy(Enum):
388
+ """The parameter adaptation strategies for the RVEA selector."""
389
+
390
+ GENERATION_BASED = 1 # Based on the current generation and the maximum generation.
391
+ FUNCTION_EVALUATION_BASED = 2 # Based on the current function evaluation and the maximum function evaluation.
392
+ OTHER = 3 # As of yet undefined strategies.
393
+
394
+
395
+ class RVEASelector(BaseDecompositionSelector):
396
+ @property
397
+ def provided_topics(self):
398
+ return {
399
+ 0: [],
400
+ 1: [
401
+ SelectorMessageTopics.STATE,
402
+ ],
403
+ 2: [
404
+ SelectorMessageTopics.REFERENCE_VECTORS,
405
+ SelectorMessageTopics.STATE,
406
+ SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
407
+ ],
408
+ }
409
+
410
+ @property
411
+ def interested_topics(self):
412
+ return [
413
+ TerminatorMessageTopics.GENERATION,
414
+ TerminatorMessageTopics.MAX_GENERATIONS,
415
+ TerminatorMessageTopics.EVALUATION,
416
+ TerminatorMessageTopics.MAX_EVALUATIONS,
417
+ ]
418
+
419
+ def __init__(
420
+ self,
421
+ problem: Problem,
422
+ alpha: float = 2.0,
423
+ parameter_adaptation_strategy: ParameterAdaptationStrategy = ParameterAdaptationStrategy.GENERATION_BASED,
424
+ reference_vector_options: ReferenceVectorOptions | None = None,
425
+ **kwargs,
426
+ ):
427
+ if not isinstance(parameter_adaptation_strategy, ParameterAdaptationStrategy):
428
+ raise TypeError(f"Parameter adaptation strategy must be of Type {type(ParameterAdaptationStrategy)}")
429
+ if parameter_adaptation_strategy == ParameterAdaptationStrategy.OTHER:
430
+ raise ValueError("Other parameter adaptation strategies are not yet implemented.")
431
+
432
+ if reference_vector_options is None:
433
+ reference_vector_options: ReferenceVectorOptions = ReferenceVectorOptions(
434
+ adaptation_frequency=100,
435
+ creation_type="simplex",
436
+ vector_type="spherical",
437
+ number_of_vectors=500,
438
+ )
439
+
440
+ super().__init__(problem=problem, reference_vector_options=reference_vector_options, **kwargs)
441
+
442
+ self.reference_vectors_gamma: np.ndarray
443
+ self.numerator: float | None = None
444
+ self.denominator: float | None = None
445
+ self.alpha = alpha
446
+ self.selected_individuals: list | pl.DataFrame
447
+ self.selected_targets: pl.DataFrame
448
+ self.selection: list[int]
449
+ self.penalty = None
450
+ self.parameter_adaptation_strategy = parameter_adaptation_strategy
451
+
452
+ def do(
453
+ self,
454
+ parents: tuple[SolutionType, pl.DataFrame],
455
+ offsprings: tuple[SolutionType, pl.DataFrame],
456
+ ) -> tuple[SolutionType, pl.DataFrame]:
457
+ """Perform the selection operation.
458
+
459
+ Args:
460
+ parents (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
461
+ The second element is the objective values, targets, and constraint violations.
462
+ offsprings (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
463
+ The second element is the objective values, targets, and constraint violations.
464
+
465
+ Returns:
466
+ tuple[SolutionType, pl.DataFrame]: The selected decision variables and their objective values,
467
+ targets, and constraint violations.
468
+ """
469
+ if isinstance(parents[0], pl.DataFrame) and isinstance(offsprings[0], pl.DataFrame):
470
+ solutions = parents[0].vstack(offsprings[0])
471
+ elif isinstance(parents[0], list) and isinstance(offsprings[0], list):
472
+ solutions = parents[0] + offsprings[0]
473
+ else:
474
+ raise TypeError("The decision variables must be either a list or a polars DataFrame, not both")
475
+ alltargets = parents[1].vstack(offsprings[1])
476
+ targets = alltargets[self.target_symbols].to_numpy()
477
+ if self.constraints_symbols is None or len(self.constraints_symbols) == 0:
478
+ constraints = None
479
+ else:
480
+ constraints = (
481
+ parents[1][self.constraints_symbols].vstack(offsprings[1][self.constraints_symbols]).to_numpy()
482
+ )
483
+
484
+ if self.ideal is None:
485
+ self.ideal = np.min(targets, axis=0)
486
+ else:
487
+ self.ideal = np.min(np.vstack((self.ideal, np.min(targets, axis=0))), axis=0)
488
+ partial_penalty_factor = self._partial_penalty_factor()
489
+ self._adapt()
490
+
491
+ ref_vectors = self.adapted_reference_vectors
492
+ # Normalization - There may be problems here
493
+ translated_targets = targets - self.ideal
494
+ targets_norm = np.linalg.norm(translated_targets, axis=1)
495
+ # TODO check if you need the next line
496
+ # TODO changing the order of the following few operations might be efficient
497
+ targets_norm = np.repeat(targets_norm, len(translated_targets[0, :])).reshape(translated_targets.shape)
498
+ # Convert zeros to eps to avoid divide by zero.
499
+ # Has to be checked!
500
+ targets_norm[targets_norm == 0] = np.finfo(float).eps
501
+ normalized_targets = np.divide(translated_targets, targets_norm) # Checked, works.
502
+ cosine = np.dot(normalized_targets, np.transpose(ref_vectors))
503
+ if cosine[np.where(cosine > 1)].size:
504
+ cosine[np.where(cosine > 1)] = 1
505
+ if cosine[np.where(cosine < 0)].size:
506
+ cosine[np.where(cosine < 0)] = 0
507
+ # Calculation of angles between reference vectors and solutions
508
+ theta = np.arccos(cosine)
509
+ # Reference vector assignment
510
+ assigned_vectors = np.argmax(cosine, axis=1)
511
+ selection = np.array([], dtype=int)
512
+ # Selection
513
+ # Convert zeros to eps to avoid divide by zero.
514
+ # Has to be checked!
515
+ ref_vectors[ref_vectors == 0] = np.finfo(float).eps
516
+ for i in range(len(ref_vectors)):
517
+ sub_population_index = np.atleast_1d(np.squeeze(np.where(assigned_vectors == i)))
518
+
519
+ # Constraint check
520
+ if len(sub_population_index) > 1 and constraints is not None:
521
+ violation_values = constraints[sub_population_index]
522
+ # violation_values = -violation_values
523
+ violation_values = np.maximum(0, violation_values)
524
+ # True if feasible
525
+ feasible_bool = (violation_values == 0).all(axis=1)
526
+
527
+ # Case when entire subpopulation is infeasible
528
+ if not feasible_bool.any():
529
+ violation_values = violation_values.sum(axis=1)
530
+ sub_population_index = sub_population_index[np.where(violation_values == violation_values.min())]
531
+ # Case when only some are infeasible
532
+ else:
533
+ sub_population_index = sub_population_index[feasible_bool]
534
+
535
+ sub_population_fitness = translated_targets[sub_population_index]
536
+ # fast tracking singly selected individuals
537
+ if len(sub_population_index) == 1:
538
+ selx = sub_population_index
539
+ if selection.shape[0] == 0:
540
+ selection = np.hstack((selection, np.transpose(selx[0])))
541
+ else:
542
+ selection = np.vstack((selection, np.transpose(selx[0])))
543
+ elif len(sub_population_index) > 1:
544
+ # APD Calculation
545
+ angles = theta[sub_population_index, i]
546
+ angles = np.divide(angles, self.reference_vectors_gamma[i]) # This is correct.
547
+ # You have done this calculation before. Check with fitness_norm
548
+ # Remove this horrible line
549
+ sub_pop_fitness_magnitude = np.sqrt(np.sum(np.power(sub_population_fitness, 2), axis=1))
550
+ apd = np.multiply(
551
+ np.transpose(sub_pop_fitness_magnitude),
552
+ (1 + np.dot(partial_penalty_factor, angles)),
553
+ )
554
+ minidx = np.where(apd == np.nanmin(apd))
555
+ if np.isnan(apd).all():
556
+ continue
557
+ selx = sub_population_index[minidx]
558
+ if selection.shape[0] == 0:
559
+ selection = np.hstack((selection, np.transpose(selx[0])))
560
+ else:
561
+ selection = np.vstack((selection, np.transpose(selx[0])))
562
+
563
+ self.selection = selection.tolist()
564
+ self.selected_individuals = solutions[selection.flatten()]
565
+ self.selected_targets = alltargets[selection.flatten()]
566
+ self.notify()
567
+ return self.selected_individuals, self.selected_targets
568
+
569
+ def _partial_penalty_factor(self) -> float:
570
+ """Calculate and return the partial penalty factor for APD calculation.
571
+
572
+ This calculation does not include the angle related terms, hence the name.
573
+ If the calculated penalty is outside [0, 1], it will round it up/down to 0/1
574
+
575
+ Returns:
576
+ float: The partial penalty factor
577
+ """
578
+ if self.numerator is None or self.denominator is None or self.denominator == 0:
579
+ raise RuntimeError("Numerator and denominator must be set before calculating the partial penalty factor.")
580
+ penalty = self.numerator / self.denominator
581
+ penalty = float(np.clip(penalty, 0, 1))
582
+ self.penalty = (penalty**self.alpha) * self.reference_vectors.shape[1]
583
+ return self.penalty
584
+
585
+ def update(self, message: Message) -> None:
586
+ """Update the parameters of the RVEA APD calculation.
587
+
588
+ Args:
589
+ message (Message): The message to update the parameters. The message should be coming from the
590
+ Terminator operator (via the Publisher).
591
+ """
592
+ if not isinstance(message.topic, TerminatorMessageTopics):
593
+ return
594
+ if not isinstance(message.value, int):
595
+ return
596
+ if self.parameter_adaptation_strategy == ParameterAdaptationStrategy.GENERATION_BASED:
597
+ if message.topic == TerminatorMessageTopics.GENERATION:
598
+ self.numerator = message.value
599
+ if message.topic == TerminatorMessageTopics.MAX_GENERATIONS:
600
+ self.denominator = message.value
601
+ elif self.parameter_adaptation_strategy == ParameterAdaptationStrategy.FUNCTION_EVALUATION_BASED:
602
+ if message.topic == TerminatorMessageTopics.EVALUATION:
603
+ self.numerator = message.value
604
+ if message.topic == TerminatorMessageTopics.MAX_EVALUATIONS:
605
+ self.denominator = message.value
606
+ return
607
+
608
+ def state(self) -> Sequence[Message]:
609
+ if self.verbosity == 0 or self.selection is None:
610
+ return []
611
+ if self.verbosity == 1:
612
+ return [
613
+ Array2DMessage(
614
+ topic=SelectorMessageTopics.REFERENCE_VECTORS,
615
+ value=self.reference_vectors.tolist(),
616
+ source=self.__class__.__name__,
617
+ ),
618
+ DictMessage(
619
+ topic=SelectorMessageTopics.STATE,
620
+ value={
621
+ "ideal": self.ideal,
622
+ "nadir": self.nadir,
623
+ "partial_penalty_factor": self._partial_penalty_factor(),
624
+ },
625
+ source=self.__class__.__name__,
626
+ ),
627
+ ] # verbosity == 2
628
+ if isinstance(self.selected_individuals, pl.DataFrame):
629
+ message = PolarsDataFrameMessage(
630
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
631
+ value=pl.concat([self.selected_individuals, self.selected_targets], how="horizontal"),
632
+ source=self.__class__.__name__,
633
+ )
634
+ else:
635
+ warnings.warn("Population is not a Polars DataFrame. Defaulting to providing OUTPUTS only.", stacklevel=2)
636
+ message = PolarsDataFrameMessage(
637
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
638
+ value=self.selected_targets,
639
+ source=self.__class__.__name__,
640
+ )
641
+ state_verbose = [
642
+ Array2DMessage(
643
+ topic=SelectorMessageTopics.REFERENCE_VECTORS,
644
+ value=self.reference_vectors.tolist(),
645
+ source=self.__class__.__name__,
646
+ ),
647
+ DictMessage(
648
+ topic=SelectorMessageTopics.STATE,
649
+ value={
650
+ "ideal": self.ideal,
651
+ "nadir": self.nadir,
652
+ "partial_penalty_factor": self._partial_penalty_factor(),
653
+ },
654
+ source=self.__class__.__name__,
655
+ ),
656
+ # DictMessage(
657
+ # topic=SelectorMessageTopics.SELECTED_INDIVIDUALS,
658
+ # value=self.selection[0].tolist(),
659
+ # source=self.__class__.__name__,
660
+ # ),
661
+ message,
662
+ ]
663
+ return state_verbose
664
+
665
+ def _adapt(self):
666
+ self.adapted_reference_vectors = self.reference_vectors
667
+ if self.ideal is not None and self.nadir is not None:
668
+ for i in range(self.reference_vectors.shape[0]):
669
+ self.adapted_reference_vectors[i] = self.reference_vectors[i] * (self.nadir - self.ideal)
670
+ self.adapted_reference_vectors = (
671
+ self.adapted_reference_vectors / np.linalg.norm(self.adapted_reference_vectors, axis=1)[:, None]
672
+ )
673
+
674
+ # More efficient way to calculate the gamma values
675
+ self.reference_vectors_gamma = np.arccos(
676
+ np.dot(self.adapted_reference_vectors, np.transpose(self.adapted_reference_vectors))
677
+ )
678
+ self.reference_vectors_gamma[np.where(self.reference_vectors_gamma == 0)] = np.inf
679
+ self.reference_vectors_gamma = np.min(self.reference_vectors_gamma, axis=1)
680
+
681
+
682
+ class NSGAIII_select(BaseDecompositionSelector):
683
+ """The NSGA-III selection operator. Code is heavily based on the version of nsga3 in the pymoo package by msu-coinlab.
684
+
685
+ Parameters
686
+ ----------
687
+ pop : Population
688
+ [description]
689
+ n_survive : int, optional
690
+ [description], by default None
691
+
692
+ """
693
+
694
+ @property
695
+ def provided_topics(self):
696
+ return {
697
+ 0: [],
698
+ 1: [
699
+ SelectorMessageTopics.STATE,
700
+ ],
701
+ 2: [
702
+ SelectorMessageTopics.REFERENCE_VECTORS,
703
+ SelectorMessageTopics.STATE,
704
+ SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
705
+ ],
706
+ }
707
+
708
+ @property
709
+ def interested_topics(self):
710
+ return []
711
+
712
+ def __init__(
713
+ self,
714
+ problem: Problem,
715
+ reference_vector_options: ReferenceVectorOptions | None = None,
716
+ **kwargs,
717
+ ):
718
+ if reference_vector_options is None:
719
+ reference_vector_options: ReferenceVectorOptions = ReferenceVectorOptions(
720
+ adaptation_frequency=0,
721
+ creation_type="simplex",
722
+ vector_type="planar",
723
+ number_of_vectors=500,
724
+ )
725
+ super().__init__(problem, reference_vector_options=reference_vector_options, **kwargs)
726
+ self.adapted_reference_vectors = None
727
+ self.worst_fitness: np.ndarray | None = None
728
+ self.extreme_points: np.ndarray | None = None
729
+ self.n_survive = self.reference_vectors.shape[0]
730
+ self.selection: list[int] | None = None
731
+ self.selected_individuals: SolutionType | None = None
732
+ self.selected_targets: pl.DataFrame | None = None
733
+
734
+ def do(
735
+ self,
736
+ parents: tuple[SolutionType, pl.DataFrame],
737
+ offsprings: tuple[SolutionType, pl.DataFrame],
738
+ ) -> tuple[SolutionType, pl.DataFrame]:
739
+ """Perform the selection operation.
740
+
741
+ Args:
742
+ parents (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
743
+ The second element is the objective values, targets, and constraint violations.
744
+ offsprings (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
745
+ The second element is the objective values, targets, and constraint violations.
746
+
747
+ Returns:
748
+ tuple[SolutionType, pl.DataFrame]: The selected decision variables and their objective values,
749
+ targets, and constraint violations.
750
+ """
751
+ if isinstance(parents[0], pl.DataFrame) and isinstance(offsprings[0], pl.DataFrame):
752
+ solutions = parents[0].vstack(offsprings[0])
753
+ elif isinstance(parents[0], list) and isinstance(offsprings[0], list):
754
+ solutions = parents[0] + offsprings[0]
755
+ else:
756
+ raise TypeError("The decision variables must be either a list or a polars DataFrame, not both")
757
+ alltargets = parents[1].vstack(offsprings[1])
758
+ targets = alltargets[self.target_symbols].to_numpy()
759
+ if self.constraints_symbols is None:
760
+ constraints = None
761
+ else:
762
+ constraints = (
763
+ parents[1][self.constraints_symbols].vstack(offsprings[1][self.constraints_symbols]).to_numpy()
764
+ )
765
+ ref_dirs = self.reference_vectors
766
+
767
+ if self.ideal is None:
768
+ self.ideal = np.min(targets, axis=0)
769
+ else:
770
+ self.ideal = np.min(np.vstack((self.ideal, np.min(targets, axis=0))), axis=0)
771
+ fitness = targets
772
+ # Calculating fronts and ranks
773
+ # fronts, dl, dc, rank = nds(fitness)
774
+ fronts = fast_non_dominated_sort(fitness)
775
+ fronts = [np.where(fronts[i])[0] for i in range(len(fronts))]
776
+ non_dominated = fronts[0]
777
+
778
+ if self.worst_fitness is None:
779
+ self.worst_fitness = np.max(fitness, axis=0)
780
+ else:
781
+ self.worst_fitness = np.amax(np.vstack((self.worst_fitness, fitness)), axis=0)
782
+
783
+ # Calculating worst points
784
+ worst_of_population = np.amax(fitness, axis=0)
785
+ worst_of_front = np.max(fitness[non_dominated, :], axis=0)
786
+ self.extreme_points = self.get_extreme_points_c(
787
+ fitness[non_dominated, :], self.ideal, extreme_points=self.extreme_points
788
+ )
789
+ self.nadir_point = nadir_point = self.get_nadir_point(
790
+ self.extreme_points,
791
+ self.ideal,
792
+ self.worst_fitness,
793
+ worst_of_population,
794
+ worst_of_front,
795
+ )
796
+
797
+ # Finding individuals in first 'n' fronts
798
+ selection = np.asarray([], dtype=int)
799
+ for front_id in range(len(fronts)):
800
+ if len(np.concatenate(fronts[: front_id + 1])) < self.n_survive:
801
+ continue
802
+ else:
803
+ fronts = fronts[: front_id + 1]
804
+ selection = np.concatenate(fronts)
805
+ break
806
+ F = fitness[selection]
807
+
808
+ last_front = fronts[-1]
809
+
810
+ # Selecting individuals from the last acceptable front.
811
+ if len(selection) > self.n_survive:
812
+ niche_of_individuals, dist_to_niche = self.associate_to_niches(F, ref_dirs, self.ideal, nadir_point)
813
+ # if there is only one front
814
+ if len(fronts) == 1:
815
+ n_remaining = self.n_survive
816
+ until_last_front = np.array([], dtype=int)
817
+ niche_count = np.zeros(len(ref_dirs), dtype=int)
818
+
819
+ # if some individuals already survived
820
+ else:
821
+ until_last_front = np.concatenate(fronts[:-1])
822
+ id_until_last_front = list(range(len(until_last_front)))
823
+ niche_count = self.calc_niche_count(len(ref_dirs), niche_of_individuals[id_until_last_front])
824
+ n_remaining = self.n_survive - len(until_last_front)
825
+
826
+ last_front_selection_id = list(range(len(until_last_front), len(selection)))
827
+ if np.any(selection[last_front_selection_id] != last_front):
828
+ print("error!!!")
829
+ selected_from_last_front = self.niching(
830
+ fitness[last_front, :],
831
+ n_remaining,
832
+ niche_count,
833
+ niche_of_individuals[last_front_selection_id],
834
+ dist_to_niche[last_front_selection_id],
835
+ )
836
+ final_selection = np.concatenate((until_last_front, last_front[selected_from_last_front]))
837
+ if self.extreme_points is None:
838
+ print("Error")
839
+ if final_selection is None:
840
+ print("Error")
841
+ else:
842
+ final_selection = selection
843
+
844
+ self.selection = final_selection.tolist()
845
+ if isinstance(solutions, pl.DataFrame) and self.selection is not None:
846
+ self.selected_individuals = solutions[self.selection]
847
+ elif isinstance(solutions, list) and self.selection is not None:
848
+ self.selected_individuals = [solutions[i] for i in self.selection]
849
+ else:
850
+ raise RuntimeError("Something went wrong with the selection")
851
+ self.selected_targets = alltargets[self.selection]
852
+
853
+ self.notify()
854
+ return self.selected_individuals, self.selected_targets
855
+
856
+ def get_extreme_points_c(self, F, ideal_point, extreme_points=None):
857
+ """Taken from pymoo"""
858
+ # calculate the asf which is used for the extreme point decomposition
859
+ asf = np.eye(F.shape[1])
860
+ asf[asf == 0] = 1e6
861
+
862
+ # add the old extreme points to never loose them for normalization
863
+ _F = F
864
+ if extreme_points is not None:
865
+ _F = np.concatenate([extreme_points, _F], axis=0)
866
+
867
+ # use __F because we substitute small values to be 0
868
+ __F = _F - ideal_point
869
+ __F[__F < 1e-3] = 0
870
+
871
+ # update the extreme points for the normalization having the highest asf value
872
+ # each
873
+ F_asf = np.max(__F * asf[:, None, :], axis=2)
874
+ I = np.argmin(F_asf, axis=1)
875
+ extreme_points = _F[I, :]
876
+ return extreme_points
877
+
878
+ def get_nadir_point(
879
+ self,
880
+ extreme_points,
881
+ ideal_point,
882
+ worst_point,
883
+ worst_of_front,
884
+ worst_of_population,
885
+ ):
886
+ LinAlgError = np.linalg.LinAlgError
887
+ try:
888
+ # find the intercepts using gaussian elimination
889
+ M = extreme_points - ideal_point
890
+ b = np.ones(extreme_points.shape[1])
891
+ plane = np.linalg.solve(M, b)
892
+ intercepts = 1 / plane
893
+
894
+ nadir_point = ideal_point + intercepts
895
+
896
+ if not np.allclose(np.dot(M, plane), b) or np.any(intercepts <= 1e-6) or np.any(nadir_point > worst_point):
897
+ raise LinAlgError()
898
+
899
+ except LinAlgError:
900
+ nadir_point = worst_of_front
901
+
902
+ b = nadir_point - ideal_point <= 1e-6
903
+ nadir_point[b] = worst_of_population[b]
904
+ return nadir_point
905
+
906
+ def niching(self, F, n_remaining, niche_count, niche_of_individuals, dist_to_niche):
907
+ survivors = []
908
+
909
+ # boolean array of elements that are considered for each iteration
910
+ mask = np.full(F.shape[0], True)
911
+
912
+ while len(survivors) < n_remaining:
913
+ # all niches where new individuals can be assigned to
914
+ next_niches_list = np.unique(niche_of_individuals[mask])
915
+
916
+ # pick a niche with minimum assigned individuals - break tie if necessary
917
+ next_niche_count = niche_count[next_niches_list]
918
+ next_niche = np.where(next_niche_count == next_niche_count.min())[0]
919
+ next_niche = next_niches_list[next_niche]
920
+ next_niche = next_niche[np.random.randint(0, len(next_niche))]
921
+
922
+ # indices of individuals that are considered and assign to next_niche
923
+ next_ind = np.where(np.logical_and(niche_of_individuals == next_niche, mask))[0]
924
+
925
+ # shuffle to break random tie (equal perp. dist) or select randomly
926
+ np.random.shuffle(next_ind)
927
+
928
+ if niche_count[next_niche] == 0:
929
+ next_ind = next_ind[np.argmin(dist_to_niche[next_ind])]
930
+ else:
931
+ # already randomized through shuffling
932
+ next_ind = next_ind[0]
933
+
934
+ mask[next_ind] = False
935
+ survivors.append(int(next_ind))
936
+
937
+ niche_count[next_niche] += 1
938
+
939
+ return survivors
940
+
941
+ def associate_to_niches(self, F, ref_dirs, ideal_point, nadir_point, utopian_epsilon=0.0):
942
+ utopian_point = ideal_point - utopian_epsilon
943
+
944
+ denom = nadir_point - utopian_point
945
+ denom[denom == 0] = 1e-12
946
+
947
+ # normalize by ideal point and intercepts
948
+ N = (F - utopian_point) / denom
949
+ dist_matrix = self.calc_perpendicular_distance(N, ref_dirs)
950
+
951
+ niche_of_individuals = np.argmin(dist_matrix, axis=1)
952
+ dist_to_niche = dist_matrix[np.arange(F.shape[0]), niche_of_individuals]
953
+
954
+ return niche_of_individuals, dist_to_niche
955
+
956
+ def calc_niche_count(self, n_niches, niche_of_individuals):
957
+ niche_count = np.zeros(n_niches, dtype=int)
958
+ index, count = np.unique(niche_of_individuals, return_counts=True)
959
+ niche_count[index] = count
960
+ return niche_count
961
+
962
+ def calc_perpendicular_distance(self, N, ref_dirs):
963
+ u = np.tile(ref_dirs, (len(N), 1))
964
+ v = np.repeat(N, len(ref_dirs), axis=0)
965
+
966
+ norm_u = np.linalg.norm(u, axis=1)
967
+
968
+ scalar_proj = np.sum(v * u, axis=1) / norm_u
969
+ proj = scalar_proj[:, None] * u / norm_u[:, None]
970
+ val = np.linalg.norm(proj - v, axis=1)
971
+ matrix = np.reshape(val, (len(N), len(ref_dirs)))
972
+
973
+ return matrix
974
+
975
+ def state(self) -> Sequence[Message]:
976
+ if self.verbosity == 0 or self.selection is None or self.selected_targets is None:
977
+ return []
978
+ if self.verbosity == 1:
979
+ return [
980
+ Array2DMessage(
981
+ topic=SelectorMessageTopics.REFERENCE_VECTORS,
982
+ value=self.reference_vectors.tolist(),
983
+ source=self.__class__.__name__,
984
+ ),
985
+ DictMessage(
986
+ topic=SelectorMessageTopics.STATE,
987
+ value={
988
+ "ideal": self.ideal,
989
+ "nadir": self.worst_fitness,
990
+ "extreme_points": self.extreme_points,
991
+ "n_survive": self.n_survive,
992
+ },
993
+ source=self.__class__.__name__,
994
+ ),
995
+ ]
996
+ # verbosity == 2
997
+ if isinstance(self.selected_individuals, pl.DataFrame):
998
+ message = PolarsDataFrameMessage(
999
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
1000
+ value=pl.concat([self.selected_individuals, self.selected_targets], how="horizontal"),
1001
+ source=self.__class__.__name__,
1002
+ )
1003
+ else:
1004
+ warnings.warn("Population is not a Polars DataFrame. Defaulting to providing OUTPUTS only.", stacklevel=2)
1005
+ message = PolarsDataFrameMessage(
1006
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
1007
+ value=self.selected_targets,
1008
+ source=self.__class__.__name__,
1009
+ )
1010
+ state_verbose = [
1011
+ Array2DMessage(
1012
+ topic=SelectorMessageTopics.REFERENCE_VECTORS,
1013
+ value=self.reference_vectors.tolist(),
1014
+ source=self.__class__.__name__,
1015
+ ),
1016
+ DictMessage(
1017
+ topic=SelectorMessageTopics.STATE,
1018
+ value={
1019
+ "ideal": self.ideal,
1020
+ "nadir": self.worst_fitness,
1021
+ "extreme_points": self.extreme_points,
1022
+ "n_survive": self.n_survive,
1023
+ },
1024
+ source=self.__class__.__name__,
1025
+ ),
1026
+ # Array2DMessage(
1027
+ # topic=SelectorMessageTopics.SELECTED_INDIVIDUALS,
1028
+ # value=self.selected_individuals,
1029
+ # source=self.__class__.__name__,
1030
+ # ),
1031
+ message,
1032
+ ]
1033
+ return state_verbose
1034
+
1035
+ def update(self, message: Message) -> None:
1036
+ pass