desdeo 2.0.0__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. desdeo/adm/ADMAfsar.py +551 -0
  2. desdeo/adm/ADMChen.py +414 -0
  3. desdeo/adm/BaseADM.py +119 -0
  4. desdeo/adm/__init__.py +11 -0
  5. desdeo/api/__init__.py +6 -6
  6. desdeo/api/app.py +38 -28
  7. desdeo/api/config.py +65 -44
  8. desdeo/api/config.toml +23 -12
  9. desdeo/api/db.py +10 -8
  10. desdeo/api/db_init.py +12 -6
  11. desdeo/api/models/__init__.py +220 -20
  12. desdeo/api/models/archive.py +16 -27
  13. desdeo/api/models/emo.py +128 -0
  14. desdeo/api/models/enautilus.py +69 -0
  15. desdeo/api/models/gdm/gdm_aggregate.py +139 -0
  16. desdeo/api/models/gdm/gdm_base.py +69 -0
  17. desdeo/api/models/gdm/gdm_score_bands.py +114 -0
  18. desdeo/api/models/gdm/gnimbus.py +138 -0
  19. desdeo/api/models/generic.py +104 -0
  20. desdeo/api/models/generic_states.py +401 -0
  21. desdeo/api/models/nimbus.py +158 -0
  22. desdeo/api/models/preference.py +44 -6
  23. desdeo/api/models/problem.py +274 -64
  24. desdeo/api/models/session.py +4 -1
  25. desdeo/api/models/state.py +419 -52
  26. desdeo/api/models/user.py +7 -6
  27. desdeo/api/models/utopia.py +25 -0
  28. desdeo/api/routers/_EMO.backup +309 -0
  29. desdeo/api/routers/_NIMBUS.py +6 -3
  30. desdeo/api/routers/emo.py +497 -0
  31. desdeo/api/routers/enautilus.py +237 -0
  32. desdeo/api/routers/gdm/gdm_aggregate.py +234 -0
  33. desdeo/api/routers/gdm/gdm_base.py +420 -0
  34. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_manager.py +398 -0
  35. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_routers.py +377 -0
  36. desdeo/api/routers/gdm/gnimbus/gnimbus_manager.py +698 -0
  37. desdeo/api/routers/gdm/gnimbus/gnimbus_routers.py +591 -0
  38. desdeo/api/routers/generic.py +233 -0
  39. desdeo/api/routers/nimbus.py +705 -0
  40. desdeo/api/routers/problem.py +201 -4
  41. desdeo/api/routers/reference_point_method.py +20 -44
  42. desdeo/api/routers/session.py +50 -26
  43. desdeo/api/routers/user_authentication.py +180 -26
  44. desdeo/api/routers/utils.py +187 -0
  45. desdeo/api/routers/utopia.py +230 -0
  46. desdeo/api/schema.py +10 -4
  47. desdeo/api/tests/conftest.py +94 -2
  48. desdeo/api/tests/test_enautilus.py +330 -0
  49. desdeo/api/tests/test_models.py +550 -72
  50. desdeo/api/tests/test_routes.py +902 -43
  51. desdeo/api/utils/_database.py +263 -0
  52. desdeo/api/utils/database.py +28 -266
  53. desdeo/api/utils/emo_database.py +40 -0
  54. desdeo/core.py +7 -0
  55. desdeo/emo/__init__.py +154 -24
  56. desdeo/emo/hooks/archivers.py +18 -2
  57. desdeo/emo/methods/EAs.py +128 -5
  58. desdeo/emo/methods/bases.py +9 -56
  59. desdeo/emo/methods/templates.py +111 -0
  60. desdeo/emo/operators/crossover.py +544 -42
  61. desdeo/emo/operators/evaluator.py +10 -14
  62. desdeo/emo/operators/generator.py +127 -24
  63. desdeo/emo/operators/mutation.py +212 -41
  64. desdeo/emo/operators/scalar_selection.py +202 -0
  65. desdeo/emo/operators/selection.py +956 -214
  66. desdeo/emo/operators/termination.py +124 -16
  67. desdeo/emo/options/__init__.py +108 -0
  68. desdeo/emo/options/algorithms.py +435 -0
  69. desdeo/emo/options/crossover.py +164 -0
  70. desdeo/emo/options/generator.py +131 -0
  71. desdeo/emo/options/mutation.py +260 -0
  72. desdeo/emo/options/repair.py +61 -0
  73. desdeo/emo/options/scalar_selection.py +66 -0
  74. desdeo/emo/options/selection.py +127 -0
  75. desdeo/emo/options/templates.py +383 -0
  76. desdeo/emo/options/termination.py +143 -0
  77. desdeo/gdm/__init__.py +22 -0
  78. desdeo/gdm/gdmtools.py +45 -0
  79. desdeo/gdm/score_bands.py +114 -0
  80. desdeo/gdm/voting_rules.py +50 -0
  81. desdeo/mcdm/__init__.py +23 -1
  82. desdeo/mcdm/enautilus.py +338 -0
  83. desdeo/mcdm/gnimbus.py +484 -0
  84. desdeo/mcdm/nautilus_navigator.py +7 -6
  85. desdeo/mcdm/reference_point_method.py +70 -0
  86. desdeo/problem/__init__.py +5 -1
  87. desdeo/problem/external/__init__.py +18 -0
  88. desdeo/problem/external/core.py +356 -0
  89. desdeo/problem/external/pymoo_provider.py +266 -0
  90. desdeo/problem/external/runtime.py +44 -0
  91. desdeo/problem/infix_parser.py +2 -2
  92. desdeo/problem/pyomo_evaluator.py +25 -6
  93. desdeo/problem/schema.py +69 -48
  94. desdeo/problem/simulator_evaluator.py +65 -15
  95. desdeo/problem/testproblems/__init__.py +26 -11
  96. desdeo/problem/testproblems/benchmarks_server.py +120 -0
  97. desdeo/problem/testproblems/cake_problem.py +185 -0
  98. desdeo/problem/testproblems/dmitry_forest_problem_discrete.py +71 -0
  99. desdeo/problem/testproblems/forest_problem.py +77 -69
  100. desdeo/problem/testproblems/multi_valued_constraints.py +119 -0
  101. desdeo/problem/testproblems/{river_pollution_problem.py → river_pollution_problems.py} +28 -22
  102. desdeo/problem/testproblems/single_objective.py +289 -0
  103. desdeo/problem/testproblems/zdt_problem.py +4 -1
  104. desdeo/tools/__init__.py +39 -21
  105. desdeo/tools/desc_gen.py +22 -0
  106. desdeo/tools/generics.py +22 -2
  107. desdeo/tools/group_scalarization.py +3090 -0
  108. desdeo/tools/indicators_binary.py +107 -1
  109. desdeo/tools/indicators_unary.py +3 -16
  110. desdeo/tools/message.py +33 -2
  111. desdeo/tools/non_dominated_sorting.py +4 -3
  112. desdeo/tools/patterns.py +9 -7
  113. desdeo/tools/pyomo_solver_interfaces.py +48 -35
  114. desdeo/tools/reference_vectors.py +118 -351
  115. desdeo/tools/scalarization.py +340 -1413
  116. desdeo/tools/score_bands.py +491 -328
  117. desdeo/tools/utils.py +117 -49
  118. desdeo/tools/visualizations.py +67 -0
  119. desdeo/utopia_stuff/utopia_problem.py +1 -1
  120. desdeo/utopia_stuff/utopia_problem_old.py +1 -1
  121. {desdeo-2.0.0.dist-info → desdeo-2.1.0.dist-info}/METADATA +46 -28
  122. desdeo-2.1.0.dist-info/RECORD +180 -0
  123. {desdeo-2.0.0.dist-info → desdeo-2.1.0.dist-info}/WHEEL +1 -1
  124. desdeo-2.0.0.dist-info/RECORD +0 -120
  125. /desdeo/api/utils/{logger.py → _logger.py} +0 -0
  126. {desdeo-2.0.0.dist-info → desdeo-2.1.0.dist-info/licenses}/LICENSE +0 -0
@@ -1,33 +1,37 @@
1
1
  """The base class for selection operators.
2
2
 
3
- This whole file should be rewritten. Everything is a mess. Moreover, the selectors do not yet take seeds as input for reproducibility.
3
+ Some operators should be rewritten.
4
4
  TODO:@light-weaver
5
5
  """
6
6
 
7
7
  import warnings
8
8
  from abc import abstractmethod
9
9
  from collections.abc import Sequence
10
- from enum import Enum
10
+ from enum import StrEnum
11
11
  from itertools import combinations
12
- from typing import Literal, TypedDict, TypeVar
12
+ from typing import Callable, Literal, TypeVar
13
13
 
14
14
  import numpy as np
15
15
  import polars as pl
16
+ from numba import njit
17
+ from pydantic import BaseModel, ConfigDict, Field
16
18
  from scipy.special import comb
17
19
  from scipy.stats.qmc import LatinHypercube
18
20
 
19
21
  from desdeo.problem import Problem
20
22
  from desdeo.tools import get_corrected_ideal_and_nadir
23
+ from desdeo.tools.indicators_binary import self_epsilon
21
24
  from desdeo.tools.message import (
22
25
  Array2DMessage,
23
26
  DictMessage,
24
27
  Message,
28
+ NumpyArrayMessage,
25
29
  PolarsDataFrameMessage,
26
30
  SelectorMessageTopics,
27
31
  TerminatorMessageTopics,
28
32
  )
29
33
  from desdeo.tools.non_dominated_sorting import fast_non_dominated_sort
30
- from desdeo.tools.patterns import Subscriber
34
+ from desdeo.tools.patterns import Publisher, Subscriber
31
35
 
32
36
  SolutionType = TypeVar("SolutionType", list, pl.DataFrame)
33
37
 
@@ -35,17 +39,18 @@ SolutionType = TypeVar("SolutionType", list, pl.DataFrame)
35
39
  class BaseSelector(Subscriber):
36
40
  """A base class for selection operators."""
37
41
 
38
- def __init__(self, problem: Problem, **kwargs):
42
+ def __init__(self, problem: Problem, verbosity: int, publisher: Publisher, seed: int = 0):
39
43
  """Initialize a selection operator."""
40
- super().__init__(**kwargs)
44
+ super().__init__(verbosity=verbosity, publisher=publisher)
41
45
  self.problem = problem
42
46
  self.variable_symbols = [x.symbol for x in problem.get_flattened_variables()]
43
47
  self.objective_symbols = [x.symbol for x in problem.objectives]
48
+ self.maximization_mult = {x.symbol: -1 if x.maximize else 1 for x in problem.objectives}
44
49
 
45
50
  if problem.scalarization_funcs is None:
46
51
  self.target_symbols = [f"{x.symbol}_min" for x in problem.objectives]
47
52
  try:
48
- ideal, nadir = get_corrected_ideal_and_nadir(problem)
53
+ ideal, nadir = get_corrected_ideal_and_nadir(problem) # This is for the minimized problem
49
54
  self.ideal = np.array([ideal[x.symbol] for x in problem.objectives])
50
55
  self.nadir = np.array([nadir[x.symbol] for x in problem.objectives]) if nadir is not None else None
51
56
  except ValueError: # in case the ideal and nadir are not provided
@@ -60,6 +65,8 @@ class BaseSelector(Subscriber):
60
65
  else:
61
66
  self.constraints_symbols = [x.symbol for x in problem.constraints]
62
67
  self.num_dims = len(self.target_symbols)
68
+ self.seed = seed
69
+ self.rng = np.random.default_rng(seed)
63
70
 
64
71
  @abstractmethod
65
72
  def do(
@@ -81,109 +88,116 @@ class BaseSelector(Subscriber):
81
88
  """
82
89
 
83
90
 
84
- class ReferenceVectorOptions(TypedDict, total=False):
85
- """The options for the reference vector based selection operators."""
91
+ class ReferenceVectorOptions(BaseModel):
92
+ """Pydantic model for Reference Vector arguments."""
86
93
 
87
- adaptation_frequency: int
88
- """Number of generations between reference vector adaptation. If set to 0, no adaptation occurs. Defaults to 100.
89
- Only used if `interactive_adaptation` is set to "none"."""
90
- creation_type: Literal["simplex", "s_energy"]
94
+ model_config = ConfigDict(use_attribute_docstrings=True)
95
+
96
+ adaptation_frequency: int = Field(default=0)
97
+ """Number of generations between reference vector adaptation. If set to 0, no adaptation occurs. Defaults to 0.
98
+ Only used if no preference is provided."""
99
+ creation_type: Literal["simplex", "s_energy"] = Field(default="simplex")
91
100
  """The method for creating reference vectors. Defaults to "simplex".
92
101
  Currently only "simplex" is implemented. Future versions will include "s_energy".
93
102
 
94
103
  If set to "simplex", the reference vectors are created using the simplex lattice design method.
95
104
  This method is generates distributions with specific numbers of reference vectors.
96
105
  Check: https://www.itl.nist.gov/div898/handbook/pri/section5/pri542.htm for more information.
97
-
98
106
  If set to "s_energy", the reference vectors are created using the Riesz s-energy criterion. This method is used to
99
107
  distribute an arbitrary number of reference vectors in the objective space while minimizing the s-energy.
100
108
  Currently not implemented.
101
109
  """
102
- vector_type: Literal["spherical", "planar"]
110
+ vector_type: Literal["spherical", "planar"] = Field(default="spherical")
103
111
  """The method for normalizing the reference vectors. Defaults to "spherical"."""
104
- lattice_resolution: int
112
+ lattice_resolution: int | None = None
105
113
  """Number of divisions along an axis when creating the simplex lattice. This is not required/used for the "s_energy"
106
- method. If not specified, the lattice resolution is calculated based on the `number_of_vectors`.
114
+ method. If not specified, the lattice resolution is calculated based on the `number_of_vectors`. If "spherical" is
115
+ selected as the `vector_type`, this value overrides the `number_of_vectors`.
107
116
  """
108
- number_of_vectors: int
117
+ number_of_vectors: int = 200
109
118
  """Number of reference vectors to be created. If "simplex" is selected as the `creation_type`, then the closest
110
119
  `lattice_resolution` is calculated based on this value. If "s_energy" is selected, then this value is used directly.
111
120
  Note that if neither `lattice_resolution` nor `number_of_vectors` is specified, the number of vectors defaults to
112
- 500.
113
- """
114
- interactive_adaptation: Literal[
115
- "preferred_solutions", "non_preferred_solutions", "preferred_ranges", "reference_point", "none"
116
- ]
117
- """The method for adapting reference vectors based on the Decision maker's preference information.
118
- Defaults to "none".
121
+ 200. Overridden if "spherical" is selected as the `vector_type` and `lattice_resolution` is provided.
119
122
  """
120
- adaptation_distance: float
123
+ adaptation_distance: float = Field(default=0.2)
121
124
  """Distance parameter for the interactive adaptation methods. Defaults to 0.2."""
122
- reference_point: dict[str, float]
125
+ reference_point: dict[str, float] | None = Field(default=None)
123
126
  """The reference point for interactive adaptation."""
124
- preferred_solutions: dict[str, list[float]]
127
+ preferred_solutions: dict[str, list[float]] | None = Field(default=None)
125
128
  """The preferred solutions for interactive adaptation."""
126
- non_preferred_solutions: dict[str, list[float]]
129
+ non_preferred_solutions: dict[str, list[float]] | None = Field(default=None)
127
130
  """The non-preferred solutions for interactive adaptation."""
128
- preferred_ranges: dict[str, list[float]]
131
+ preferred_ranges: dict[str, list[float]] | None = Field(default=None)
129
132
  """The preferred ranges for interactive adaptation."""
130
133
 
131
134
 
132
135
  class BaseDecompositionSelector(BaseSelector):
133
136
  """Base class for decomposition based selection operators."""
134
137
 
135
- def __init__(self, problem: Problem, reference_vector_options: ReferenceVectorOptions, **kwargs):
136
- super().__init__(problem, **kwargs)
138
+ def __init__(
139
+ self,
140
+ problem: Problem,
141
+ reference_vector_options: ReferenceVectorOptions,
142
+ verbosity: int,
143
+ publisher: Publisher,
144
+ invert_reference_vectors: bool = False,
145
+ seed: int = 0,
146
+ ):
147
+ super().__init__(problem, verbosity=verbosity, publisher=publisher, seed=seed)
137
148
  self.reference_vector_options = reference_vector_options
149
+ self.invert_reference_vectors = invert_reference_vectors
138
150
  self.reference_vectors: np.ndarray
139
151
  self.reference_vectors_initial: np.ndarray
140
152
 
141
- # Set default values
142
- if "creation_type" not in self.reference_vector_options:
143
- self.reference_vector_options["creation_type"] = "simplex"
144
- if "vector_type" not in self.reference_vector_options:
145
- self.reference_vector_options["vector_type"] = "spherical"
146
- if "adaptation_frequency" not in self.reference_vector_options:
147
- self.reference_vector_options["adaptation_frequency"] = 100
148
- if self.reference_vector_options["creation_type"] == "simplex":
149
- self._create_simplex()
150
- elif self.reference_vector_options["creation_type"] == "s_energy":
153
+ if self.reference_vector_options.creation_type == "s_energy":
151
154
  raise NotImplementedError("Riesz s-energy criterion is not yet implemented.")
152
155
 
153
- if "interactive_adaptation" not in self.reference_vector_options:
154
- self.reference_vector_options["interactive_adaptation"] = "none"
155
- elif self.reference_vector_options["interactive_adaptation"] != "none":
156
- self.reference_vector_options["adaptation_frequency"] = 0
157
- if "adaptation_distance" not in self.reference_vector_options:
158
- self.reference_vector_options["adaptation_distance"] = 0.2
159
156
  self._create_simplex()
160
157
 
161
- if self.reference_vector_options["interactive_adaptation"] == "reference_point":
162
- if "reference_point" not in self.reference_vector_options:
163
- raise ValueError("Reference point must be specified for interactive adaptation.")
158
+ if self.reference_vector_options.reference_point:
159
+ corrected_rp = np.array(
160
+ [
161
+ self.reference_vector_options.reference_point[x] * self.maximization_mult[x]
162
+ for x in self.objective_symbols
163
+ ]
164
+ )
164
165
  self.interactive_adapt_3(
165
- np.array([self.reference_vector_options["reference_point"][x] for x in self.target_symbols]),
166
- translation_param=self.reference_vector_options["adaptation_distance"],
166
+ corrected_rp,
167
+ translation_param=self.reference_vector_options.adaptation_distance,
167
168
  )
168
- elif self.reference_vector_options["interactive_adaptation"] == "preferred_solutions":
169
- if "preferred_solutions" not in self.reference_vector_options:
170
- raise ValueError("Preferred solutions must be specified for interactive adaptation.")
169
+ elif self.reference_vector_options.preferred_solutions:
170
+ corrected_sols = np.array(
171
+ [
172
+ np.array(self.reference_vector_options.preferred_solutions[x]) * self.maximization_mult[x]
173
+ for x in self.objective_symbols
174
+ ]
175
+ ).T
171
176
  self.interactive_adapt_1(
172
- np.array([self.reference_vector_options["preferred_solutions"][x] for x in self.target_symbols]).T,
173
- translation_param=self.reference_vector_options["adaptation_distance"],
177
+ corrected_sols,
178
+ translation_param=self.reference_vector_options.adaptation_distance,
174
179
  )
175
- elif self.reference_vector_options["interactive_adaptation"] == "non_preferred_solutions":
176
- if "non_preferred_solutions" not in self.reference_vector_options:
177
- raise ValueError("Non-preferred solutions must be specified for interactive adaptation.")
180
+ elif self.reference_vector_options.non_preferred_solutions:
181
+ corrected_sols = np.array(
182
+ [
183
+ np.array(self.reference_vector_options.non_preferred_solutions[x]) * self.maximization_mult[x]
184
+ for x in self.objective_symbols
185
+ ]
186
+ ).T
178
187
  self.interactive_adapt_2(
179
- np.array([self.reference_vector_options["non_preferred_solutions"][x] for x in self.target_symbols]).T,
180
- predefined_distance=self.reference_vector_options["adaptation_distance"],
188
+ corrected_sols,
189
+ predefined_distance=self.reference_vector_options.adaptation_distance,
190
+ ord=2 if self.reference_vector_options.vector_type == "spherical" else 1,
181
191
  )
182
- elif self.reference_vector_options["interactive_adaptation"] == "preferred_ranges":
183
- if "preferred_ranges" not in self.reference_vector_options:
184
- raise ValueError("Preferred ranges must be specified for interactive adaptation.")
192
+ elif self.reference_vector_options.preferred_ranges:
193
+ corrected_ranges = np.array(
194
+ [
195
+ np.array(self.reference_vector_options.preferred_ranges[x]) * self.maximization_mult[x]
196
+ for x in self.objective_symbols
197
+ ]
198
+ ).T
185
199
  self.interactive_adapt_4(
186
- np.array([self.reference_vector_options["preferred_ranges"][x] for x in self.target_symbols]).T,
200
+ corrected_ranges,
187
201
  )
188
202
 
189
203
  def _create_simplex(self):
@@ -203,14 +217,12 @@ class BaseDecompositionSelector(BaseSelector):
203
217
  break
204
218
  return temp_lattice_resolution - 1
205
219
 
206
- if "lattice_resolution" in self.reference_vector_options:
207
- lattice_resolution = self.reference_vector_options["lattice_resolution"]
208
- elif "number_of_vectors" in self.reference_vector_options:
220
+ if self.reference_vector_options.lattice_resolution:
221
+ lattice_resolution = self.reference_vector_options.lattice_resolution
222
+ else:
209
223
  lattice_resolution = approx_lattice_resolution(
210
- self.reference_vector_options["number_of_vectors"], num_dims=self.num_dims
224
+ self.reference_vector_options.number_of_vectors, num_dims=self.num_dims
211
225
  )
212
- else:
213
- lattice_resolution = approx_lattice_resolution(500, num_dims=self.num_dims)
214
226
 
215
227
  number_of_vectors: int = comb(
216
228
  lattice_resolution + self.num_dims - 1,
@@ -218,8 +230,8 @@ class BaseDecompositionSelector(BaseSelector):
218
230
  exact=True,
219
231
  )
220
232
 
221
- self.reference_vector_options["number_of_vectors"] = number_of_vectors
222
- self.reference_vector_options["lattice_resolution"] = lattice_resolution
233
+ self.reference_vector_options.number_of_vectors = number_of_vectors
234
+ self.reference_vector_options.lattice_resolution = lattice_resolution
223
235
 
224
236
  temp1 = range(1, self.num_dims + lattice_resolution)
225
237
  temp1 = np.array(list(combinations(temp1, self.num_dims - 1)))
@@ -230,20 +242,31 @@ class BaseDecompositionSelector(BaseSelector):
230
242
  for i in range(1, self.num_dims - 1):
231
243
  weight[:, i] = temp[:, i] - temp[:, i - 1]
232
244
  weight[:, -1] = lattice_resolution - temp[:, -1]
233
- self.reference_vectors = weight / lattice_resolution
245
+ if not self.invert_reference_vectors: # todo, this currently only exists for nsga3
246
+ self.reference_vectors = weight / lattice_resolution
247
+ else:
248
+ self.reference_vectors = 1 - (weight / lattice_resolution)
234
249
  self.reference_vectors_initial = np.copy(self.reference_vectors)
235
250
  self._normalize_rvs()
236
251
 
237
252
  def _normalize_rvs(self):
238
253
  """Normalize the reference vectors to a unit hypersphere."""
239
- if self.reference_vector_options["vector_type"] == "spherical":
254
+ if self.reference_vector_options.vector_type == "spherical":
240
255
  norm = np.linalg.norm(self.reference_vectors, axis=1).reshape(-1, 1)
241
256
  norm[norm == 0] = np.finfo(float).eps
242
- elif self.reference_vector_options["vector_type"] == "planar":
243
- norm = np.sum(self.reference_vectors, axis=1).reshape(-1, 1)
244
- else:
245
- raise ValueError("Invalid vector type. Must be either 'spherical' or 'planar'.")
246
- self.reference_vectors = np.divide(self.reference_vectors, norm)
257
+ self.reference_vectors = np.divide(self.reference_vectors, norm)
258
+ return
259
+ if self.reference_vector_options.vector_type == "planar":
260
+ if not self.invert_reference_vectors:
261
+ norm = np.sum(self.reference_vectors, axis=1).reshape(-1, 1)
262
+ self.reference_vectors = np.divide(self.reference_vectors, norm)
263
+ return
264
+ else:
265
+ norm = np.sum(1 - self.reference_vectors, axis=1).reshape(-1, 1)
266
+ self.reference_vectors = 1 - np.divide(1 - self.reference_vectors, norm)
267
+ return
268
+ # Not needed due to pydantic validation
269
+ raise ValueError("Invalid vector type. Must be either 'spherical' or 'planar'.")
247
270
 
248
271
  def interactive_adapt_1(self, z: np.ndarray, translation_param: float) -> None:
249
272
  """Adapt reference vectors using the information about prefererred solution(s) selected by the Decision maker.
@@ -269,7 +292,7 @@ class BaseDecompositionSelector(BaseSelector):
269
292
  self._normalize_rvs()
270
293
  self.add_edge_vectors()
271
294
 
272
- def interactive_adapt_2(self, z: np.ndarray, predefined_distance: float) -> None:
295
+ def interactive_adapt_2(self, z: np.ndarray, predefined_distance: float, ord: int) -> None:
273
296
  """Adapt reference vectors by using the information about non-preferred solution(s) selected by the Decision maker.
274
297
 
275
298
  After the Decision maker has specified non-preferred solution(s), Euclidian distance between normalized solution
@@ -291,12 +314,12 @@ class BaseDecompositionSelector(BaseSelector):
291
314
  Args:
292
315
  z (np.ndarray): Non-preferred solution(s).
293
316
  predefined_distance (float): The reference vectors that are closer than this distance are either removed or
294
- re-positioned somewhere else.
295
- Default value: 0.2
317
+ re-positioned somewhere else. Default value: 0.2
318
+ ord (int): Order of the norm. Default is 2, i.e., Euclidian distance.
296
319
  """
297
320
  # calculate L1 norm of non-preferred solution(s)
298
321
  z = np.atleast_2d(z)
299
- norm = np.linalg.norm(z, ord=2, axis=1).reshape(np.shape(z)[0], 1)
322
+ norm = np.linalg.norm(z, ord=ord, axis=1).reshape(np.shape(z)[0], 1)
300
323
 
301
324
  # non-preferred solutions normalized
302
325
  v_c = np.divide(z, norm)
@@ -361,7 +384,7 @@ class BaseDecompositionSelector(BaseSelector):
361
384
  upper_limits = np.max(preferred_ranges, axis=0)
362
385
 
363
386
  # generate samples using Latin hypercube sampling
364
- lhs = LatinHypercube(d=self.num_dims)
387
+ lhs = LatinHypercube(d=self.num_dims, seed=self.rng)
365
388
  w = lhs.random(n=self.reference_vectors_initial.shape[0])
366
389
 
367
390
  # scale between bounds
@@ -384,12 +407,135 @@ class BaseDecompositionSelector(BaseSelector):
384
407
  self._normalize_rvs()
385
408
 
386
409
 
387
- class ParameterAdaptationStrategy(Enum):
410
+ class ParameterAdaptationStrategy(StrEnum):
388
411
  """The parameter adaptation strategies for the RVEA selector."""
389
412
 
390
- GENERATION_BASED = 1 # Based on the current generation and the maximum generation.
391
- FUNCTION_EVALUATION_BASED = 2 # Based on the current function evaluation and the maximum function evaluation.
392
- OTHER = 3 # As of yet undefined strategies.
413
+ GENERATION_BASED = "GENERATION_BASED" # Based on the current generation and the maximum generation.
414
+ FUNCTION_EVALUATION_BASED = (
415
+ "FUNCTION_EVALUATION_BASED" # Based on the current function evaluation and the maximum function evaluation.
416
+ )
417
+ OTHER = "OTHER" # As of yet undefined strategies.
418
+
419
+
420
+ @njit
421
+ def _rvea_selection(
422
+ fitness: np.ndarray, reference_vectors: np.ndarray, ideal: np.ndarray, partial_penalty: float, gamma: np.ndarray
423
+ ) -> tuple[np.ndarray, np.ndarray]:
424
+ """Select individuals based on their fitness and their distance to the reference vectors.
425
+
426
+ Args:
427
+ fitness (np.ndarray): The fitness values of the individuals.
428
+ reference_vectors (np.ndarray): The reference vectors.
429
+ ideal (np.ndarray): The ideal point.
430
+ partial_penalty (float): The partial penalty in APD.
431
+ gamma (np.ndarray): The angle between current and closest reference vector.
432
+
433
+ Returns:
434
+ tuple[np.ndarray, np.ndarray]: The selected individuals and their APD fitness values.
435
+ """
436
+ tranlated_fitness = fitness - ideal
437
+ num_vectors = reference_vectors.shape[0]
438
+ num_solutions = fitness.shape[0]
439
+
440
+ cos_matrix = np.zeros((num_solutions, num_vectors))
441
+
442
+ for i in range(num_solutions):
443
+ solution = tranlated_fitness[i]
444
+ norm = np.linalg.norm(solution)
445
+ for j in range(num_vectors):
446
+ cos_matrix[i, j] = np.dot(solution, reference_vectors[j]) / max(1e-10, norm) # Avoid division by zero
447
+
448
+ assignment_matrix = np.zeros((num_solutions, num_vectors), dtype=np.bool_)
449
+
450
+ for i in range(num_solutions):
451
+ assignment_matrix[i, np.argmax(cos_matrix[i])] = True
452
+
453
+ selection = np.zeros(num_solutions, dtype=np.bool_)
454
+ apd_fitness = np.zeros(num_solutions, dtype=np.float64)
455
+
456
+ for j in range(num_vectors):
457
+ min_apd = np.inf
458
+ select = -1
459
+ for i in np.where(assignment_matrix[:, j])[0]:
460
+ solution = tranlated_fitness[i]
461
+ apd = (1 + (partial_penalty * np.arccos(cos_matrix[i, j]) / gamma[j])) * np.linalg.norm(solution)
462
+ apd_fitness[i] = apd
463
+ if apd < min_apd:
464
+ min_apd = apd
465
+ select = i
466
+ selection[select] = True
467
+
468
+ return selection, apd_fitness
469
+
470
+
471
+ @njit
472
+ def _rvea_selection_constrained(
473
+ fitness: np.ndarray,
474
+ constraints: np.ndarray,
475
+ reference_vectors: np.ndarray,
476
+ ideal: np.ndarray,
477
+ partial_penalty: float,
478
+ gamma: np.ndarray,
479
+ ) -> tuple[np.ndarray, np.ndarray]:
480
+ """Select individuals based on their fitness and their distance to the reference vectors.
481
+
482
+ Args:
483
+ fitness (np.ndarray): The fitness values of the individuals.
484
+ constraints (np.ndarray): The constraint violations of the individuals.
485
+ reference_vectors (np.ndarray): The reference vectors.
486
+ ideal (np.ndarray): The ideal point.
487
+ partial_penalty (float): The partial penalty in APD.
488
+ gamma (np.ndarray): The angle between current and closest reference vector.
489
+
490
+ Returns:
491
+ tuple[np.ndarray, np.ndarray]: The selected individuals and their APD fitness values.
492
+ """
493
+ tranlated_fitness = fitness - ideal
494
+ num_vectors = reference_vectors.shape[0]
495
+ num_solutions = fitness.shape[0]
496
+
497
+ violations = np.maximum(0, constraints)
498
+
499
+ cos_matrix = np.zeros((num_solutions, num_vectors))
500
+
501
+ for i in range(num_solutions):
502
+ solution = tranlated_fitness[i]
503
+ norm = np.linalg.norm(solution)
504
+ for j in range(num_vectors):
505
+ cos_matrix[i, j] = np.dot(solution, reference_vectors[j]) / max(1e-10, norm) # Avoid division by zero
506
+
507
+ assignment_matrix = np.zeros((num_solutions, num_vectors), dtype=np.bool_)
508
+
509
+ for i in range(num_solutions):
510
+ assignment_matrix[i, np.argmax(cos_matrix[i])] = True
511
+
512
+ selection = np.zeros(num_solutions, dtype=np.bool_)
513
+ apd_fitness = np.zeros(num_solutions, dtype=np.float64)
514
+
515
+ for j in range(num_vectors):
516
+ min_apd = np.inf
517
+ min_violation = np.inf
518
+ select = -1
519
+ select_violation = -1
520
+ for i in np.where(assignment_matrix[:, j])[0]:
521
+ solution = tranlated_fitness[i]
522
+ apd = (1 + (partial_penalty * np.arccos(cos_matrix[i, j]) / gamma[j])) * np.linalg.norm(solution)
523
+ apd_fitness[i] = apd
524
+ feasible = np.all(violations[i] == 0)
525
+ current_violation = np.sum(violations[i])
526
+ if feasible:
527
+ if apd < min_apd:
528
+ min_apd = apd
529
+ select = i
530
+ elif current_violation < min_violation:
531
+ min_violation = current_violation
532
+ select_violation = i
533
+ if select != -1:
534
+ selection[select] = True
535
+ else:
536
+ selection[select_violation] = True
537
+
538
+ return selection, apd_fitness
393
539
 
394
540
 
395
541
  class RVEASelector(BaseDecompositionSelector):
@@ -419,25 +565,42 @@ class RVEASelector(BaseDecompositionSelector):
419
565
  def __init__(
420
566
  self,
421
567
  problem: Problem,
568
+ verbosity: int,
569
+ publisher: Publisher,
422
570
  alpha: float = 2.0,
423
571
  parameter_adaptation_strategy: ParameterAdaptationStrategy = ParameterAdaptationStrategy.GENERATION_BASED,
424
- reference_vector_options: ReferenceVectorOptions | None = None,
425
- **kwargs,
572
+ reference_vector_options: ReferenceVectorOptions | dict | None = None,
573
+ seed: int = 0,
426
574
  ):
427
- if not isinstance(parameter_adaptation_strategy, ParameterAdaptationStrategy):
575
+ if parameter_adaptation_strategy not in ParameterAdaptationStrategy:
428
576
  raise TypeError(f"Parameter adaptation strategy must be of Type {type(ParameterAdaptationStrategy)}")
429
577
  if parameter_adaptation_strategy == ParameterAdaptationStrategy.OTHER:
430
578
  raise ValueError("Other parameter adaptation strategies are not yet implemented.")
431
579
 
432
580
  if reference_vector_options is None:
433
- reference_vector_options: ReferenceVectorOptions = ReferenceVectorOptions(
434
- adaptation_frequency=100,
435
- creation_type="simplex",
436
- vector_type="spherical",
437
- number_of_vectors=500,
581
+ reference_vector_options = ReferenceVectorOptions()
582
+
583
+ if isinstance(reference_vector_options, dict):
584
+ reference_vector_options = ReferenceVectorOptions.model_validate(reference_vector_options)
585
+
586
+ # Just asserting correct options for RVEA
587
+ reference_vector_options.vector_type = "spherical"
588
+ if reference_vector_options.adaptation_frequency == 0:
589
+ warnings.warn(
590
+ "Adaptation frequency was set to 0. Setting it to 100 for RVEA selector. "
591
+ "Set it to 0 only if you provide preference information.",
592
+ UserWarning,
593
+ stacklevel=2,
438
594
  )
439
-
440
- super().__init__(problem=problem, reference_vector_options=reference_vector_options, **kwargs)
595
+ reference_vector_options.adaptation_frequency = 100
596
+
597
+ super().__init__(
598
+ problem=problem,
599
+ reference_vector_options=reference_vector_options,
600
+ verbosity=verbosity,
601
+ publisher=publisher,
602
+ seed=seed,
603
+ )
441
604
 
442
605
  self.reference_vectors_gamma: np.ndarray
443
606
  self.numerator: float | None = None
@@ -448,6 +611,7 @@ class RVEASelector(BaseDecompositionSelector):
448
611
  self.selection: list[int]
449
612
  self.penalty = None
450
613
  self.parameter_adaptation_strategy = parameter_adaptation_strategy
614
+ self.adapted_reference_vectors = None
451
615
 
452
616
  def do(
453
617
  self,
@@ -472,97 +636,59 @@ class RVEASelector(BaseDecompositionSelector):
472
636
  solutions = parents[0] + offsprings[0]
473
637
  else:
474
638
  raise TypeError("The decision variables must be either a list or a polars DataFrame, not both")
639
+ if len(parents[0]) == 0:
640
+ raise RuntimeError(
641
+ "The parents population is empty. Cannot perform selection. This is a known unresolved issue."
642
+ )
475
643
  alltargets = parents[1].vstack(offsprings[1])
476
644
  targets = alltargets[self.target_symbols].to_numpy()
477
645
  if self.constraints_symbols is None or len(self.constraints_symbols) == 0:
478
- constraints = None
646
+ # No constraints :)
647
+ if self.ideal is None:
648
+ self.ideal = np.min(targets, axis=0)
649
+ else:
650
+ self.ideal = np.min(np.vstack((self.ideal, np.min(targets, axis=0))), axis=0)
651
+ self.nadir = np.max(targets, axis=0) if self.nadir is None else self.nadir
652
+ if self.adapted_reference_vectors is None:
653
+ self._adapt()
654
+ selection, _ = _rvea_selection(
655
+ fitness=targets,
656
+ reference_vectors=self.adapted_reference_vectors,
657
+ ideal=self.ideal,
658
+ partial_penalty=self._partial_penalty_factor(),
659
+ gamma=self.reference_vectors_gamma,
660
+ )
479
661
  else:
662
+ # Yes constraints :(
480
663
  constraints = (
481
664
  parents[1][self.constraints_symbols].vstack(offsprings[1][self.constraints_symbols]).to_numpy()
482
665
  )
666
+ feasible = (constraints <= 0).all(axis=1)
667
+ # Note that
668
+ if self.ideal is None:
669
+ # TODO: This breaks if there are no feasible solutions in the initial population
670
+ self.ideal = np.min(targets[feasible], axis=0)
671
+ else:
672
+ self.ideal = np.min(np.vstack((self.ideal, np.min(targets[feasible], axis=0))), axis=0)
673
+ try:
674
+ nadir = np.max(targets[feasible], axis=0)
675
+ self.nadir = nadir
676
+ except ValueError: # No feasible solution in current population
677
+ pass # Use previous nadir
678
+ if self.adapted_reference_vectors is None:
679
+ self._adapt()
680
+ selection, _ = _rvea_selection_constrained(
681
+ fitness=targets,
682
+ constraints=constraints,
683
+ reference_vectors=self.adapted_reference_vectors,
684
+ ideal=self.ideal,
685
+ partial_penalty=self._partial_penalty_factor(),
686
+ gamma=self.reference_vectors_gamma,
687
+ )
483
688
 
484
- if self.ideal is None:
485
- self.ideal = np.min(targets, axis=0)
486
- else:
487
- self.ideal = np.min(np.vstack((self.ideal, np.min(targets, axis=0))), axis=0)
488
- partial_penalty_factor = self._partial_penalty_factor()
489
- self._adapt()
490
-
491
- ref_vectors = self.adapted_reference_vectors
492
- # Normalization - There may be problems here
493
- translated_targets = targets - self.ideal
494
- targets_norm = np.linalg.norm(translated_targets, axis=1)
495
- # TODO check if you need the next line
496
- # TODO changing the order of the following few operations might be efficient
497
- targets_norm = np.repeat(targets_norm, len(translated_targets[0, :])).reshape(translated_targets.shape)
498
- # Convert zeros to eps to avoid divide by zero.
499
- # Has to be checked!
500
- targets_norm[targets_norm == 0] = np.finfo(float).eps
501
- normalized_targets = np.divide(translated_targets, targets_norm) # Checked, works.
502
- cosine = np.dot(normalized_targets, np.transpose(ref_vectors))
503
- if cosine[np.where(cosine > 1)].size:
504
- cosine[np.where(cosine > 1)] = 1
505
- if cosine[np.where(cosine < 0)].size:
506
- cosine[np.where(cosine < 0)] = 0
507
- # Calculation of angles between reference vectors and solutions
508
- theta = np.arccos(cosine)
509
- # Reference vector assignment
510
- assigned_vectors = np.argmax(cosine, axis=1)
511
- selection = np.array([], dtype=int)
512
- # Selection
513
- # Convert zeros to eps to avoid divide by zero.
514
- # Has to be checked!
515
- ref_vectors[ref_vectors == 0] = np.finfo(float).eps
516
- for i in range(len(ref_vectors)):
517
- sub_population_index = np.atleast_1d(np.squeeze(np.where(assigned_vectors == i)))
518
-
519
- # Constraint check
520
- if len(sub_population_index) > 1 and constraints is not None:
521
- violation_values = constraints[sub_population_index]
522
- # violation_values = -violation_values
523
- violation_values = np.maximum(0, violation_values)
524
- # True if feasible
525
- feasible_bool = (violation_values == 0).all(axis=1)
526
-
527
- # Case when entire subpopulation is infeasible
528
- if not feasible_bool.any():
529
- violation_values = violation_values.sum(axis=1)
530
- sub_population_index = sub_population_index[np.where(violation_values == violation_values.min())]
531
- # Case when only some are infeasible
532
- else:
533
- sub_population_index = sub_population_index[feasible_bool]
534
-
535
- sub_population_fitness = translated_targets[sub_population_index]
536
- # fast tracking singly selected individuals
537
- if len(sub_population_index) == 1:
538
- selx = sub_population_index
539
- if selection.shape[0] == 0:
540
- selection = np.hstack((selection, np.transpose(selx[0])))
541
- else:
542
- selection = np.vstack((selection, np.transpose(selx[0])))
543
- elif len(sub_population_index) > 1:
544
- # APD Calculation
545
- angles = theta[sub_population_index, i]
546
- angles = np.divide(angles, self.reference_vectors_gamma[i]) # This is correct.
547
- # You have done this calculation before. Check with fitness_norm
548
- # Remove this horrible line
549
- sub_pop_fitness_magnitude = np.sqrt(np.sum(np.power(sub_population_fitness, 2), axis=1))
550
- apd = np.multiply(
551
- np.transpose(sub_pop_fitness_magnitude),
552
- (1 + np.dot(partial_penalty_factor, angles)),
553
- )
554
- minidx = np.where(apd == np.nanmin(apd))
555
- if np.isnan(apd).all():
556
- continue
557
- selx = sub_population_index[minidx]
558
- if selection.shape[0] == 0:
559
- selection = np.hstack((selection, np.transpose(selx[0])))
560
- else:
561
- selection = np.vstack((selection, np.transpose(selx[0])))
562
-
563
- self.selection = selection.tolist()
564
- self.selected_individuals = solutions[selection.flatten()]
565
- self.selected_targets = alltargets[selection.flatten()]
689
+ self.selection = np.where(selection)[0].tolist()
690
+ self.selected_individuals = solutions[self.selection]
691
+ self.selected_targets = alltargets[self.selection]
566
692
  self.notify()
567
693
  return self.selected_individuals, self.selected_targets
568
694
 
@@ -596,6 +722,11 @@ class RVEASelector(BaseDecompositionSelector):
596
722
  if self.parameter_adaptation_strategy == ParameterAdaptationStrategy.GENERATION_BASED:
597
723
  if message.topic == TerminatorMessageTopics.GENERATION:
598
724
  self.numerator = message.value
725
+ if (
726
+ self.reference_vector_options.adaptation_frequency > 0
727
+ and self.numerator % self.reference_vector_options.adaptation_frequency == 0
728
+ ):
729
+ self._adapt()
599
730
  if message.topic == TerminatorMessageTopics.MAX_GENERATIONS:
600
731
  self.denominator = message.value
601
732
  elif self.parameter_adaptation_strategy == ParameterAdaptationStrategy.FUNCTION_EVALUATION_BASED:
@@ -671,25 +802,51 @@ class RVEASelector(BaseDecompositionSelector):
671
802
  self.adapted_reference_vectors / np.linalg.norm(self.adapted_reference_vectors, axis=1)[:, None]
672
803
  )
673
804
 
674
- # More efficient way to calculate the gamma values
675
- self.reference_vectors_gamma = np.arccos(
676
- np.dot(self.adapted_reference_vectors, np.transpose(self.adapted_reference_vectors))
677
- )
678
- self.reference_vectors_gamma[np.where(self.reference_vectors_gamma == 0)] = np.inf
679
- self.reference_vectors_gamma = np.min(self.reference_vectors_gamma, axis=1)
680
-
681
-
682
- class NSGAIII_select(BaseDecompositionSelector):
683
- """The NSGA-III selection operator. Code is heavily based on the version of nsga3 in the pymoo package by msu-coinlab.
805
+ self.reference_vectors_gamma = np.zeros(self.adapted_reference_vectors.shape[0])
806
+ for i in range(self.adapted_reference_vectors.shape[0]):
807
+ closest_angle = np.inf
808
+ for j in range(self.adapted_reference_vectors.shape[0]):
809
+ if i != j:
810
+ angle = np.arccos(
811
+ np.clip(np.dot(self.adapted_reference_vectors[i], self.adapted_reference_vectors[j]), -1.0, 1.0)
812
+ )
813
+ if angle < closest_angle and angle > 0:
814
+ # In cases with extreme differences in obj func ranges
815
+ # sometimes, the closest reference vectors are so close that
816
+ # the angle between them is 0 according to arccos (literally 0)
817
+ closest_angle = angle
818
+ self.reference_vectors_gamma[i] = closest_angle
819
+
820
+
821
+ @njit
822
+ def jitted_calc_perpendicular_distance(
823
+ solutions: np.ndarray, ref_dirs: np.ndarray, invert_reference_vectors: bool
824
+ ) -> np.ndarray:
825
+ """Calculate the perpendicular distance between solutions and reference directions.
826
+
827
+ Args:
828
+ solutions (np.ndarray): The normalized solutions.
829
+ ref_dirs (np.ndarray): The reference directions.
830
+ invert_reference_vectors (bool): Whether to invert the reference vectors.
831
+
832
+ Returns:
833
+ np.ndarray: The perpendicular distance matrix.
834
+ """
835
+ matrix = np.zeros((solutions.shape[0], ref_dirs.shape[0]))
836
+ for i in range(ref_dirs.shape[0]):
837
+ for j in range(solutions.shape[0]):
838
+ if invert_reference_vectors:
839
+ unit_vector = 1 - ref_dirs[i]
840
+ unit_vector = -unit_vector / np.linalg.norm(unit_vector)
841
+ else:
842
+ unit_vector = ref_dirs[i] / np.linalg.norm(ref_dirs[i])
843
+ component = ref_dirs[i] - solutions[j] - np.dot(ref_dirs[i] - solutions[j], unit_vector) * unit_vector
844
+ matrix[j, i] = np.linalg.norm(component)
845
+ return matrix
684
846
 
685
- Parameters
686
- ----------
687
- pop : Population
688
- [description]
689
- n_survive : int, optional
690
- [description], by default None
691
847
 
692
- """
848
+ class NSGA3Selector(BaseDecompositionSelector):
849
+ """The NSGA-III selection operator, heavily based on the version of nsga3 in the pymoo package by msu-coinlab."""
693
850
 
694
851
  @property
695
852
  def provided_topics(self):
@@ -712,17 +869,40 @@ class NSGAIII_select(BaseDecompositionSelector):
712
869
  def __init__(
713
870
  self,
714
871
  problem: Problem,
872
+ verbosity: int,
873
+ publisher: Publisher,
715
874
  reference_vector_options: ReferenceVectorOptions | None = None,
716
- **kwargs,
875
+ invert_reference_vectors: bool = False,
876
+ seed: int = 0,
717
877
  ):
878
+ """Initialize the NSGA-III selection operator.
879
+
880
+ Args:
881
+ problem (Problem): The optimization problem to be solved.
882
+ verbosity (int): The verbosity level of the operator.
883
+ publisher (Publisher): The publisher to use for communication.
884
+ reference_vector_options (ReferenceVectorOptions | None, optional): Options for the reference vectors. Defaults to None.
885
+ invert_reference_vectors (bool, optional): Whether to invert the reference vectors. Defaults to False.
886
+ seed (int, optional): The random seed to use. Defaults to 0.
887
+ """
718
888
  if reference_vector_options is None:
719
- reference_vector_options: ReferenceVectorOptions = ReferenceVectorOptions(
720
- adaptation_frequency=0,
721
- creation_type="simplex",
722
- vector_type="planar",
723
- number_of_vectors=500,
724
- )
725
- super().__init__(problem, reference_vector_options=reference_vector_options, **kwargs)
889
+ reference_vector_options = ReferenceVectorOptions()
890
+ elif isinstance(reference_vector_options, dict):
891
+ reference_vector_options = ReferenceVectorOptions.model_validate(reference_vector_options)
892
+
893
+ # Just asserting correct options for NSGA-III
894
+ reference_vector_options.vector_type = "planar"
895
+ super().__init__(
896
+ problem,
897
+ reference_vector_options=reference_vector_options,
898
+ verbosity=verbosity,
899
+ publisher=publisher,
900
+ seed=seed,
901
+ invert_reference_vectors=invert_reference_vectors,
902
+ )
903
+ if self.constraints_symbols is not None:
904
+ raise NotImplementedError("NSGA3 selector does not support constraints. Please use a different selector.")
905
+
726
906
  self.adapted_reference_vectors = None
727
907
  self.worst_fitness: np.ndarray | None = None
728
908
  self.extreme_points: np.ndarray | None = None
@@ -917,13 +1097,13 @@ class NSGAIII_select(BaseDecompositionSelector):
917
1097
  next_niche_count = niche_count[next_niches_list]
918
1098
  next_niche = np.where(next_niche_count == next_niche_count.min())[0]
919
1099
  next_niche = next_niches_list[next_niche]
920
- next_niche = next_niche[np.random.randint(0, len(next_niche))]
1100
+ next_niche = next_niche[self.rng.integers(0, len(next_niche))]
921
1101
 
922
1102
  # indices of individuals that are considered and assign to next_niche
923
1103
  next_ind = np.where(np.logical_and(niche_of_individuals == next_niche, mask))[0]
924
1104
 
925
1105
  # shuffle to break random tie (equal perp. dist) or select randomly
926
- np.random.shuffle(next_ind)
1106
+ self.rng.shuffle(next_ind)
927
1107
 
928
1108
  if niche_count[next_niche] == 0:
929
1109
  next_ind = next_ind[np.argmin(dist_to_niche[next_ind])]
@@ -946,7 +1126,8 @@ class NSGAIII_select(BaseDecompositionSelector):
946
1126
 
947
1127
  # normalize by ideal point and intercepts
948
1128
  N = (F - utopian_point) / denom
949
- dist_matrix = self.calc_perpendicular_distance(N, ref_dirs)
1129
+ # dist_matrix = self.calc_perpendicular_distance(N, ref_dirs)
1130
+ dist_matrix = jitted_calc_perpendicular_distance(N, ref_dirs, self.invert_reference_vectors)
950
1131
 
951
1132
  niche_of_individuals = np.argmin(dist_matrix, axis=1)
952
1133
  dist_to_niche = dist_matrix[np.arange(F.shape[0]), niche_of_individuals]
@@ -960,8 +1141,12 @@ class NSGAIII_select(BaseDecompositionSelector):
960
1141
  return niche_count
961
1142
 
962
1143
  def calc_perpendicular_distance(self, N, ref_dirs):
963
- u = np.tile(ref_dirs, (len(N), 1))
964
- v = np.repeat(N, len(ref_dirs), axis=0)
1144
+ if self.invert_reference_vectors:
1145
+ u = np.tile(-ref_dirs, (len(N), 1))
1146
+ v = np.repeat(1 - N, len(ref_dirs), axis=0)
1147
+ else:
1148
+ u = np.tile(ref_dirs, (len(N), 1))
1149
+ v = np.repeat(N, len(ref_dirs), axis=0)
965
1150
 
966
1151
  norm_u = np.linalg.norm(u, axis=1)
967
1152
 
@@ -1034,3 +1219,560 @@ class NSGAIII_select(BaseDecompositionSelector):
1034
1219
 
1035
1220
  def update(self, message: Message) -> None:
1036
1221
  pass
1222
+
1223
+
1224
+ @njit
1225
+ def _ibea_fitness(fitness_components: np.ndarray, kappa: float) -> np.ndarray:
1226
+ """Calculates the IBEA fitness for each individual based on pairwise fitness components.
1227
+
1228
+ Args:
1229
+ fitness_components (np.ndarray): The pairwise fitness components of the individuals.
1230
+ kappa (float): The kappa value for the IBEA selection.
1231
+
1232
+ Returns:
1233
+ np.ndarray: The IBEA fitness values for each individual.
1234
+ """
1235
+ num_individuals = fitness_components.shape[0]
1236
+ fitness = np.zeros(num_individuals)
1237
+ for i in range(num_individuals):
1238
+ for j in range(num_individuals):
1239
+ if i != j:
1240
+ fitness[i] -= np.exp(-fitness_components[j, i] / kappa)
1241
+ return fitness
1242
+
1243
+
1244
+ @njit
1245
+ def _ibea_select(fitness_components: np.ndarray, bad_sols: np.ndarray, kappa: float) -> int:
1246
+ """Selects the worst individual based on the IBEA indicator.
1247
+
1248
+ Args:
1249
+ fitness_components (np.ndarray): The pairwise fitness components of the individuals.
1250
+ bad_sols (np.ndarray): A boolean array indicating which individuals are considered "bad".
1251
+ kappa (float): The kappa value for the IBEA selection.
1252
+
1253
+ Returns:
1254
+ int: The index of the selected individual.
1255
+ """
1256
+ fitness = np.zeros(len(fitness_components))
1257
+ for i in range(len(fitness_components)):
1258
+ if bad_sols[i]:
1259
+ continue
1260
+ for j in range(len(fitness_components)):
1261
+ if bad_sols[j] or i == j:
1262
+ continue
1263
+ fitness[i] -= np.exp(-fitness_components[j, i] / kappa)
1264
+ choice = np.argmin(fitness)
1265
+ if fitness[choice] >= 0:
1266
+ if sum(bad_sols) == len(fitness_components) - 1:
1267
+ # If all but one individual is chosen, select the last one
1268
+ return np.where(~bad_sols)[0][0]
1269
+ raise RuntimeError("All individuals have non-negative fitness. Cannot select a new individual.")
1270
+ return choice
1271
+
1272
+
1273
+ @njit
1274
+ def _ibea_select_all(fitness_components: np.ndarray, population_size: int, kappa: float) -> np.ndarray:
1275
+ """Selects all individuals based on the IBEA indicator.
1276
+
1277
+ Args:
1278
+ fitness_components (np.ndarray): The pairwise fitness components of the individuals.
1279
+ population_size (int): The desired size of the population after selection.
1280
+ kappa (float): The kappa value for the IBEA selection.
1281
+
1282
+ Returns:
1283
+ list[int]: The list of indices of the selected individuals.
1284
+ """
1285
+ current_pop_size = len(fitness_components)
1286
+ bad_sols = np.zeros(current_pop_size, dtype=np.bool_)
1287
+ fitness = np.zeros(len(fitness_components))
1288
+ mod_fit_components = np.exp(-fitness_components / kappa)
1289
+ for i in range(len(fitness_components)):
1290
+ for j in range(len(fitness_components)):
1291
+ if i == j:
1292
+ continue
1293
+ fitness[i] -= mod_fit_components[j, i]
1294
+ while current_pop_size - sum(bad_sols) > population_size:
1295
+ selected = np.argmin(fitness)
1296
+ if fitness[selected] >= 0:
1297
+ if sum(bad_sols) == len(fitness_components) - 1:
1298
+ # If all but one individual is chosen, select the last one
1299
+ selected = np.where(~bad_sols)[0][0]
1300
+ raise RuntimeError("All individuals have non-negative fitness. Cannot select a new individual.")
1301
+ fitness[selected] = np.inf # Make sure that this individual is not selected again
1302
+ bad_sols[selected] = True
1303
+ for i in range(len(mod_fit_components)):
1304
+ if bad_sols[i]:
1305
+ continue
1306
+ # Update fitness of the remaining individuals
1307
+ fitness[i] += mod_fit_components[selected, i]
1308
+ return ~bad_sols
1309
+
1310
+
1311
+ class IBEASelector(BaseSelector):
1312
+ """The adaptive IBEA selection operator.
1313
+
1314
+ Reference: Zitzler, E., Künzli, S. (2004). Indicator-Based Selection in Multiobjective Search. In: Yao, X., et al.
1315
+ Parallel Problem Solving from Nature - PPSN VIII. PPSN 2004. Lecture Notes in Computer Science, vol 3242.
1316
+ Springer, Berlin, Heidelberg. https://doi.org/10.1007/978-3-540-30217-9_84
1317
+ """
1318
+
1319
+ @property
1320
+ def provided_topics(self):
1321
+ return {
1322
+ 0: [],
1323
+ 1: [SelectorMessageTopics.STATE],
1324
+ 2: [SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS, SelectorMessageTopics.SELECTED_FITNESS],
1325
+ }
1326
+
1327
+ @property
1328
+ def interested_topics(self):
1329
+ return []
1330
+
1331
+ def __init__(
1332
+ self,
1333
+ problem: Problem,
1334
+ verbosity: int,
1335
+ publisher: Publisher,
1336
+ population_size: int,
1337
+ kappa: float = 0.05,
1338
+ binary_indicator: Callable[[np.ndarray], np.ndarray] = self_epsilon,
1339
+ seed: int = 0,
1340
+ ):
1341
+ """Initialize the IBEA selector.
1342
+
1343
+ Args:
1344
+ problem (Problem): The problem to solve.
1345
+ verbosity (int): The verbosity level of the selector.
1346
+ publisher (Publisher): The publisher to send messages to.
1347
+ population_size (int): The size of the population to select.
1348
+ kappa (float, optional): The kappa value for the IBEA selection. Defaults to 0.05.
1349
+ binary_indicator (Callable[[np.ndarray], np.ndarray], optional): The binary indicator function to use.
1350
+ Defaults to self_epsilon with uses binary addaptive epsilon indicator.
1351
+ """
1352
+ # TODO(@light-weaver): IBEA doesn't perform as good as expected
1353
+ # The distribution of solutions found isn't very uniform
1354
+ # Update 21st August, tested against jmetalpy IBEA. Our version is both faster and better
1355
+ # What is happening???
1356
+ # Results are similar to this https://github.com/Xavier-MaYiMing/IBEA/
1357
+ super().__init__(problem=problem, verbosity=verbosity, publisher=publisher, seed=seed)
1358
+ self.selection: list[int] | None = None
1359
+ self.selected_individuals: SolutionType | None = None
1360
+ self.selected_targets: pl.DataFrame | None = None
1361
+ self.binary_indicator = binary_indicator
1362
+ self.kappa = kappa
1363
+ self.population_size = population_size
1364
+ if self.constraints_symbols is not None:
1365
+ raise NotImplementedError("IBEA selector does not support constraints. Please use a different selector.")
1366
+
1367
+ def do(
1368
+ self, parents: tuple[SolutionType, pl.DataFrame], offsprings: tuple[SolutionType, pl.DataFrame]
1369
+ ) -> tuple[SolutionType, pl.DataFrame]:
1370
+ """Perform the selection operation.
1371
+
1372
+ Args:
1373
+ parents (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
1374
+ The second element is the objective values, targets, and constraint violations.
1375
+ offsprings (tuple[SolutionType, pl.DataFrame]): the decision variables as the first element.
1376
+ The second element is the objective values, targets, and constraint violations.
1377
+
1378
+ Returns:
1379
+ tuple[SolutionType, pl.DataFrame]: The selected decision variables and their objective values,
1380
+ targets, and constraint violations.
1381
+ """
1382
+ if self.constraints_symbols is not None:
1383
+ raise NotImplementedError("IBEA selector does not support constraints. Please use a different selector.")
1384
+ if isinstance(parents[0], pl.DataFrame) and isinstance(offsprings[0], pl.DataFrame):
1385
+ solutions = parents[0].vstack(offsprings[0])
1386
+ elif isinstance(parents[0], list) and isinstance(offsprings[0], list):
1387
+ solutions = parents[0] + offsprings[0]
1388
+ else:
1389
+ raise TypeError("The decision variables must be either a list or a polars DataFrame, not both")
1390
+ if len(parents[0]) < self.population_size:
1391
+ return parents[0], parents[1]
1392
+ alltargets = parents[1].vstack(offsprings[1])
1393
+
1394
+ # Adaptation
1395
+ target_vals = alltargets[self.target_symbols].to_numpy()
1396
+ target_min = np.min(target_vals, axis=0)
1397
+ target_max = np.max(target_vals, axis=0)
1398
+ # Scale the targets to the range [0, 1]
1399
+ target_vals = (target_vals - target_min) / (target_max - target_min)
1400
+ fitness_components = self.binary_indicator(target_vals)
1401
+ kappa_mult = np.max(np.abs(fitness_components))
1402
+
1403
+ chosen = _ibea_select_all(
1404
+ fitness_components, population_size=self.population_size, kappa=kappa_mult * self.kappa
1405
+ )
1406
+ self.selected_individuals = solutions.filter(chosen)
1407
+ self.selected_targets = alltargets.filter(chosen)
1408
+ self.selection = chosen
1409
+
1410
+ fitness_components = fitness_components[chosen][:, chosen]
1411
+ self.fitness = _ibea_fitness(fitness_components, kappa=self.kappa * np.abs(fitness_components).max())
1412
+
1413
+ self.notify()
1414
+ return self.selected_individuals, self.selected_targets
1415
+
1416
+ def state(self) -> Sequence[Message]:
1417
+ """Return the state of the selector."""
1418
+ if self.verbosity == 0 or self.selection is None or self.selected_targets is None:
1419
+ return []
1420
+ if self.verbosity == 1:
1421
+ return [
1422
+ DictMessage(
1423
+ topic=SelectorMessageTopics.STATE,
1424
+ value={
1425
+ "population_size": self.population_size,
1426
+ "selected_individuals": self.selection,
1427
+ },
1428
+ source=self.__class__.__name__,
1429
+ )
1430
+ ]
1431
+ # verbosity == 2
1432
+ if isinstance(self.selected_individuals, pl.DataFrame):
1433
+ message = PolarsDataFrameMessage(
1434
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
1435
+ value=pl.concat([self.selected_individuals, self.selected_targets], how="horizontal"),
1436
+ source=self.__class__.__name__,
1437
+ )
1438
+ else:
1439
+ warnings.warn("Population is not a Polars DataFrame. Defaulting to providing OUTPUTS only.", stacklevel=2)
1440
+ message = PolarsDataFrameMessage(
1441
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
1442
+ value=self.selected_targets,
1443
+ source=self.__class__.__name__,
1444
+ )
1445
+ return [
1446
+ DictMessage(
1447
+ topic=SelectorMessageTopics.STATE,
1448
+ value={
1449
+ "population_size": self.population_size,
1450
+ "selected_individuals": self.selection,
1451
+ },
1452
+ source=self.__class__.__name__,
1453
+ ),
1454
+ message,
1455
+ NumpyArrayMessage(
1456
+ topic=SelectorMessageTopics.SELECTED_FITNESS,
1457
+ value=self.fitness,
1458
+ source=self.__class__.__name__,
1459
+ ),
1460
+ ]
1461
+
1462
+ def update(self, message: Message) -> None:
1463
+ pass
1464
+
1465
+
1466
+ @njit
1467
+ def _nsga2_crowding_distance_assignment(
1468
+ non_dominated_front: np.ndarray, f_mins: np.ndarray, f_maxs: np.ndarray
1469
+ ) -> np.ndarray:
1470
+ """Computes the crowding distance as pecified in the definition of NSGA2.
1471
+
1472
+ This function computed the crowding distances for a non-dominated set of solutions.
1473
+ A smaller value means that a solution is more crowded (worse), while a larger value means
1474
+ it is less crowded (better).
1475
+
1476
+ Note:
1477
+ The boundary point in `non_dominated_front` will be assigned a non-crowding
1478
+ distance value of `np.inf` indicating, that they shouls always be included
1479
+ in later sorting.
1480
+
1481
+ Args:
1482
+ non_dominated_front (np.ndarray): a 2D numpy array (size n x m = number
1483
+ of vectors x number of targets (obejctive funcitons)) containing
1484
+ mutually non-dominated vectors. The values of the vectors correspond to
1485
+ the optimization 'target' (usually the minimized objective function
1486
+ values.)
1487
+ f_mins (np.ndarray): a 1D numpy array of size m containing the minimum objective function
1488
+ values in `non_dominated_front`.
1489
+ f_maxs (np.ndarray): a 1D numpy array of size m containing the maximum objective function
1490
+ values in `non_dominated_front`.
1491
+
1492
+ Returns:
1493
+ np.ndarray: a numpy array of size m containing the crowding distances for each vector
1494
+ in `non_dominated_front`.
1495
+
1496
+ Reference: Deb, K., Pratap, A., Agarwal, S., & Meyarivan, T. A. M. T.
1497
+ (2002). A fast and elitist multiobjective genetic algorithm: NSGA-II. IEEE
1498
+ transactions on evolutionary computation, 6(2), 182-197.
1499
+ """
1500
+ vectors = non_dominated_front # I
1501
+ num_vectors = vectors.shape[0] # l
1502
+ num_objectives = vectors.shape[1]
1503
+
1504
+ crowding_distances = np.zeros(num_vectors) # I[i]_distance
1505
+
1506
+ for m in range(num_objectives):
1507
+ # sort by column (objective)
1508
+ m_order = vectors[:, m].argsort()
1509
+ # inlcude boundary points
1510
+ crowding_distances[m_order[0]], crowding_distances[m_order[-1]] = np.inf, np.inf
1511
+
1512
+ for i in range(1, num_vectors - 1):
1513
+ crowding_distances[m_order[i]] = crowding_distances[m_order[i]] + (
1514
+ vectors[m_order[i + 1], m] - vectors[m_order[i - 1], m]
1515
+ ) / (f_maxs[m] - f_mins[m])
1516
+
1517
+ return crowding_distances
1518
+
1519
+
1520
+ class NSGA2Selector(BaseSelector):
1521
+ """Implements the selection operator defined for NSGA2.
1522
+
1523
+ Implements the selection operator defined for NSGA2, which included the crowding
1524
+ distance calculation.
1525
+
1526
+ Reference: Deb, K., Pratap, A., Agarwal, S., & Meyarivan, T. A. M. T.
1527
+ (2002). A fast and elitist multiobjective genetic algorithm: NSGA-II. IEEE
1528
+ transactions on evolutionary computation, 6(2), 182-197.
1529
+ """
1530
+
1531
+ @property
1532
+ def provided_topics(self):
1533
+ """The topics provided for the NSGA2 method."""
1534
+ return {
1535
+ 0: [],
1536
+ 1: [SelectorMessageTopics.STATE],
1537
+ 2: [SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS, SelectorMessageTopics.SELECTED_FITNESS],
1538
+ }
1539
+
1540
+ @property
1541
+ def interested_topics(self):
1542
+ """The topics the NSGA2 method is interested in."""
1543
+ return []
1544
+
1545
+ def __init__(
1546
+ self,
1547
+ problem: Problem,
1548
+ verbosity: int,
1549
+ publisher: Publisher,
1550
+ population_size: int,
1551
+ seed: int = 0,
1552
+ ):
1553
+ super().__init__(problem=problem, verbosity=verbosity, publisher=publisher, seed=seed)
1554
+ if self.constraints_symbols is not None:
1555
+ print(
1556
+ "NSGA2 selector does not currently support constraints. "
1557
+ "Results may vary if used to solve constrainted problems."
1558
+ )
1559
+ self.population_size = population_size
1560
+ self.seed = seed
1561
+ self.selection: list[int] | None = None
1562
+ self.selected_individuals: SolutionType | None = None
1563
+ self.selected_targets: pl.DataFrame | None = None
1564
+
1565
+ def do(
1566
+ self, parents: tuple[SolutionType, pl.DataFrame], offsprings: tuple[SolutionType, pl.DataFrame]
1567
+ ) -> tuple[SolutionType, pl.DataFrame]:
1568
+ """Perform the selection operation."""
1569
+ # First iteration, offspring is empty
1570
+ # Do basic binary tournament selection, recombination, and mutation
1571
+ # In practice, just compute the non-dom ranks and provide them as fitness
1572
+
1573
+ # Off-spring empty (first iteration, compute only non-dominated ranks and provide them as fitness)
1574
+ if offsprings[0].is_empty() and offsprings[1].is_empty():
1575
+ # just compute non-dominated ranks of population and be done
1576
+ parents_a = parents[1][self.target_symbols].to_numpy()
1577
+ fronts = fast_non_dominated_sort(parents_a)
1578
+
1579
+ # assign fitness according to non-dom rank (lower better)
1580
+ scores = np.arange(len(fronts))
1581
+ fitness_values = scores @ fronts
1582
+ self.fitness = fitness_values
1583
+
1584
+ # all selected in first iteration
1585
+ self.selection = list(range(len(parents[1])))
1586
+ self.selected_individuals = parents[0]
1587
+ self.selected_targets = parents[1]
1588
+
1589
+ self.notify()
1590
+
1591
+ return self.selected_individuals, self.selected_targets
1592
+
1593
+ # #Actual selection operator for NSGA2
1594
+
1595
+ # Combine parent and offspring R_t = P_t U Q_t
1596
+ r_solutions = parents[0].vstack(offsprings[0])
1597
+ r_population = parents[1].vstack(offsprings[1])
1598
+ r_targets_arr = r_population[self.target_symbols].to_numpy()
1599
+
1600
+ # the minimum and maximum target values in the whole current population
1601
+ f_mins, f_maxs = np.min(r_targets_arr, axis=0), np.max(r_targets_arr, axis=0)
1602
+
1603
+ # Do fast non-dominated sorting on R_t -> F
1604
+ fronts = fast_non_dominated_sort(r_targets_arr)
1605
+ crowding_distances = np.ones(self.population_size) * np.nan
1606
+ rankings = np.ones(self.population_size) * np.nan
1607
+ fitness_values = np.ones(self.population_size) * np.nan
1608
+
1609
+ # Set the new parent population to P_t+1 = empty and i=1
1610
+ new_parents = np.ones((self.population_size, parents[1].shape[1])) * np.nan
1611
+ new_parents_solutions = np.ones((self.population_size, parents[0].shape[1])) * np.nan
1612
+ parents_ptr = 0 # keep track where stuff was last added
1613
+
1614
+ # the -1 is here because searchsorted returns the index where we can insert the population size to preserve the
1615
+ # order, hence, the previous index of this will be the last element in the cumsum that is less than
1616
+ # the population size
1617
+ last_whole_front_idx = (
1618
+ np.searchsorted(np.cumsum(np.sum(fronts, axis=1)), self.population_size, side="right") - 1
1619
+ )
1620
+
1621
+ last_ranking = 0 # in case first front is larger th population size
1622
+ for i in range(last_whole_front_idx + 1): # inclusive
1623
+ # The looped front here will result in a new population with size <= 100.
1624
+
1625
+ # Compute the crowding distances for F_i
1626
+ distances = _nsga2_crowding_distance_assignment(r_targets_arr[fronts[i]], f_mins, f_maxs)
1627
+ crowding_distances[parents_ptr : parents_ptr + distances.shape[0]] = (
1628
+ distances # distances will have same number of elements as in front[i]
1629
+ )
1630
+
1631
+ # keep track of the rankings as well (best = 0, larger worse). First
1632
+ # non-dom front will have a rank fitness of 0.
1633
+ rankings[parents_ptr : parents_ptr + distances.shape[0]] = i
1634
+
1635
+ # P_t+1 = P_t+1 U F_i
1636
+ new_parents[parents_ptr : parents_ptr + distances.shape[0]] = r_population.filter(fronts[i])
1637
+ new_parents_solutions[parents_ptr : parents_ptr + distances.shape[0]] = r_solutions.filter(fronts[i])
1638
+
1639
+ # compute fitness
1640
+ # infs are checked since boundary points are assigned this value when computing the crowding distance
1641
+ finite_distances = distances[distances != np.inf]
1642
+ max_no_inf = np.nanmax(finite_distances) if finite_distances.size > 0 else np.ones(fronts[i].sum())
1643
+ distances_no_inf = np.nan_to_num(distances, posinf=max_no_inf * 1.1)
1644
+
1645
+ # Distances for the current front normalized between 0 and 1.
1646
+ # The small scalar we add in the nominator and denominator is to
1647
+ # ensure that no distance value would result in exactly 0 after
1648
+ # normalizing, which would increase the corresponding solution
1649
+ # ranking, once reversed, which we do not want to.
1650
+ normalized_distances = (distances_no_inf - (distances_no_inf.min() - 1e-6)) / (
1651
+ distances_no_inf.max() - (distances_no_inf.min() - 1e-6)
1652
+ )
1653
+
1654
+ # since higher is better for the crowded distance, we substract the normalized distances from 1 so that
1655
+ # lower is better, which allows us to combine them with the ranking
1656
+ # No value here should be 1.0 or greater.
1657
+ reversed_distances = 1.0 - normalized_distances
1658
+
1659
+ front_fitness = reversed_distances + rankings[parents_ptr : parents_ptr + distances.shape[0]]
1660
+ fitness_values[parents_ptr : parents_ptr + distances.shape[0]] = front_fitness
1661
+
1662
+ # increment parent pointer
1663
+ parents_ptr += distances.shape[0]
1664
+
1665
+ # keep track of last given rank
1666
+ last_ranking = i
1667
+
1668
+ # deal with last (partial) front, if needed
1669
+ trimmed_and_sorted_indices = None
1670
+ if parents_ptr < self.population_size:
1671
+ distances = _nsga2_crowding_distance_assignment(
1672
+ r_targets_arr[fronts[last_whole_front_idx + 1]], f_mins, f_maxs
1673
+ )
1674
+
1675
+ # Sort F_i in descending order according to crowding distance
1676
+ # This makes picking the selected part of the partial front easier
1677
+ trimmed_and_sorted_indices = distances.argsort()[::-1][: self.population_size - parents_ptr]
1678
+
1679
+ crowding_distances[parents_ptr : self.population_size] = distances[trimmed_and_sorted_indices]
1680
+ rankings[parents_ptr : self.population_size] = last_ranking + 1
1681
+
1682
+ # P_t+1 = P_t+1 U F_i[1: (N - |P_t+1|)]
1683
+ new_parents[parents_ptr : self.population_size] = r_population.filter(fronts[last_whole_front_idx + 1])[
1684
+ trimmed_and_sorted_indices
1685
+ ]
1686
+ new_parents_solutions[parents_ptr : self.population_size] = r_solutions.filter(
1687
+ fronts[last_whole_front_idx + 1]
1688
+ )[trimmed_and_sorted_indices]
1689
+
1690
+ # compute fitness (see above for details)
1691
+ finite_distances = distances[trimmed_and_sorted_indices][distances[trimmed_and_sorted_indices] != np.inf]
1692
+ max_no_inf = (
1693
+ np.nanmax(finite_distances)
1694
+ if finite_distances.size > 0
1695
+ else np.ones(len(trimmed_and_sorted_indices)) # we have only boundary points
1696
+ )
1697
+ distances_no_inf = np.nan_to_num(distances[trimmed_and_sorted_indices], posinf=max_no_inf * 1.1)
1698
+
1699
+ normalized_distances = (distances_no_inf - (distances_no_inf.min() - 1e-6)) / (
1700
+ distances_no_inf.max() - (distances_no_inf.min() - 1e-6)
1701
+ )
1702
+
1703
+ reversed_distances = 1.0 - normalized_distances
1704
+
1705
+ front_fitness = reversed_distances + rankings[parents_ptr : self.population_size]
1706
+ fitness_values[parents_ptr : parents_ptr + self.population_size] = front_fitness
1707
+
1708
+ # back to polars, return values
1709
+ solutions = pl.DataFrame(new_parents_solutions, schema=parents[0].schema)
1710
+ outputs = pl.DataFrame(new_parents, schema=parents[1].schema)
1711
+
1712
+ self.fitness = fitness_values
1713
+
1714
+ whole_fronts = fronts[: last_whole_front_idx + 1]
1715
+ whole_indices = [np.where(row)[0].tolist() for row in whole_fronts]
1716
+
1717
+ if trimmed_and_sorted_indices is not None:
1718
+ # partial front considered
1719
+ partial_front = fronts[last_whole_front_idx + 1]
1720
+ partial_indices = np.where(partial_front)[0][trimmed_and_sorted_indices].tolist()
1721
+ else:
1722
+ partial_indices = []
1723
+
1724
+ self.selection = [index for indices in whole_indices for index in indices] + partial_indices
1725
+ self.selected_individuals = solutions
1726
+ self.selected_targets = outputs
1727
+
1728
+ self.notify()
1729
+ return solutions, outputs
1730
+
1731
+ def state(self) -> Sequence[Message]:
1732
+ """Return the state of the selector."""
1733
+ if self.verbosity == 0 or self.selection is None or self.selected_targets is None:
1734
+ return []
1735
+ if self.verbosity == 1:
1736
+ return [
1737
+ DictMessage(
1738
+ topic=SelectorMessageTopics.STATE,
1739
+ value={
1740
+ "population_size": self.population_size,
1741
+ "selected_individuals": self.selection,
1742
+ },
1743
+ source=self.__class__.__name__,
1744
+ )
1745
+ ]
1746
+ # verbosity == 2
1747
+ if isinstance(self.selected_individuals, pl.DataFrame):
1748
+ message = PolarsDataFrameMessage(
1749
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
1750
+ value=pl.concat([self.selected_individuals, self.selected_targets], how="horizontal"),
1751
+ source=self.__class__.__name__,
1752
+ )
1753
+ else:
1754
+ warnings.warn("Population is not a Polars DataFrame. Defaulting to providing OUTPUTS only.", stacklevel=2)
1755
+ message = PolarsDataFrameMessage(
1756
+ topic=SelectorMessageTopics.SELECTED_VERBOSE_OUTPUTS,
1757
+ value=self.selected_targets,
1758
+ source=self.__class__.__name__,
1759
+ )
1760
+ return [
1761
+ DictMessage(
1762
+ topic=SelectorMessageTopics.STATE,
1763
+ value={
1764
+ "population_size": self.population_size,
1765
+ "selected_individuals": self.selection,
1766
+ },
1767
+ source=self.__class__.__name__,
1768
+ ),
1769
+ message,
1770
+ NumpyArrayMessage(
1771
+ topic=SelectorMessageTopics.SELECTED_FITNESS,
1772
+ value=self.fitness,
1773
+ source=self.__class__.__name__,
1774
+ ),
1775
+ ]
1776
+
1777
+ def update(self, message: Message) -> None:
1778
+ pass