desdeo 2.0.0__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. desdeo/adm/ADMAfsar.py +551 -0
  2. desdeo/adm/ADMChen.py +414 -0
  3. desdeo/adm/BaseADM.py +119 -0
  4. desdeo/adm/__init__.py +11 -0
  5. desdeo/api/__init__.py +6 -6
  6. desdeo/api/app.py +38 -28
  7. desdeo/api/config.py +65 -44
  8. desdeo/api/config.toml +23 -12
  9. desdeo/api/db.py +10 -8
  10. desdeo/api/db_init.py +12 -6
  11. desdeo/api/models/__init__.py +220 -20
  12. desdeo/api/models/archive.py +16 -27
  13. desdeo/api/models/emo.py +128 -0
  14. desdeo/api/models/enautilus.py +69 -0
  15. desdeo/api/models/gdm/gdm_aggregate.py +139 -0
  16. desdeo/api/models/gdm/gdm_base.py +69 -0
  17. desdeo/api/models/gdm/gdm_score_bands.py +114 -0
  18. desdeo/api/models/gdm/gnimbus.py +138 -0
  19. desdeo/api/models/generic.py +104 -0
  20. desdeo/api/models/generic_states.py +401 -0
  21. desdeo/api/models/nimbus.py +158 -0
  22. desdeo/api/models/preference.py +44 -6
  23. desdeo/api/models/problem.py +274 -64
  24. desdeo/api/models/session.py +4 -1
  25. desdeo/api/models/state.py +419 -52
  26. desdeo/api/models/user.py +7 -6
  27. desdeo/api/models/utopia.py +25 -0
  28. desdeo/api/routers/_EMO.backup +309 -0
  29. desdeo/api/routers/_NIMBUS.py +6 -3
  30. desdeo/api/routers/emo.py +497 -0
  31. desdeo/api/routers/enautilus.py +237 -0
  32. desdeo/api/routers/gdm/gdm_aggregate.py +234 -0
  33. desdeo/api/routers/gdm/gdm_base.py +420 -0
  34. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_manager.py +398 -0
  35. desdeo/api/routers/gdm/gdm_score_bands/gdm_score_bands_routers.py +377 -0
  36. desdeo/api/routers/gdm/gnimbus/gnimbus_manager.py +698 -0
  37. desdeo/api/routers/gdm/gnimbus/gnimbus_routers.py +591 -0
  38. desdeo/api/routers/generic.py +233 -0
  39. desdeo/api/routers/nimbus.py +705 -0
  40. desdeo/api/routers/problem.py +201 -4
  41. desdeo/api/routers/reference_point_method.py +20 -44
  42. desdeo/api/routers/session.py +50 -26
  43. desdeo/api/routers/user_authentication.py +180 -26
  44. desdeo/api/routers/utils.py +187 -0
  45. desdeo/api/routers/utopia.py +230 -0
  46. desdeo/api/schema.py +10 -4
  47. desdeo/api/tests/conftest.py +94 -2
  48. desdeo/api/tests/test_enautilus.py +330 -0
  49. desdeo/api/tests/test_models.py +550 -72
  50. desdeo/api/tests/test_routes.py +902 -43
  51. desdeo/api/utils/_database.py +263 -0
  52. desdeo/api/utils/database.py +28 -266
  53. desdeo/api/utils/emo_database.py +40 -0
  54. desdeo/core.py +7 -0
  55. desdeo/emo/__init__.py +154 -24
  56. desdeo/emo/hooks/archivers.py +18 -2
  57. desdeo/emo/methods/EAs.py +128 -5
  58. desdeo/emo/methods/bases.py +9 -56
  59. desdeo/emo/methods/templates.py +111 -0
  60. desdeo/emo/operators/crossover.py +544 -42
  61. desdeo/emo/operators/evaluator.py +10 -14
  62. desdeo/emo/operators/generator.py +127 -24
  63. desdeo/emo/operators/mutation.py +212 -41
  64. desdeo/emo/operators/scalar_selection.py +202 -0
  65. desdeo/emo/operators/selection.py +956 -214
  66. desdeo/emo/operators/termination.py +124 -16
  67. desdeo/emo/options/__init__.py +108 -0
  68. desdeo/emo/options/algorithms.py +435 -0
  69. desdeo/emo/options/crossover.py +164 -0
  70. desdeo/emo/options/generator.py +131 -0
  71. desdeo/emo/options/mutation.py +260 -0
  72. desdeo/emo/options/repair.py +61 -0
  73. desdeo/emo/options/scalar_selection.py +66 -0
  74. desdeo/emo/options/selection.py +127 -0
  75. desdeo/emo/options/templates.py +383 -0
  76. desdeo/emo/options/termination.py +143 -0
  77. desdeo/gdm/__init__.py +22 -0
  78. desdeo/gdm/gdmtools.py +45 -0
  79. desdeo/gdm/score_bands.py +114 -0
  80. desdeo/gdm/voting_rules.py +50 -0
  81. desdeo/mcdm/__init__.py +23 -1
  82. desdeo/mcdm/enautilus.py +338 -0
  83. desdeo/mcdm/gnimbus.py +484 -0
  84. desdeo/mcdm/nautilus_navigator.py +7 -6
  85. desdeo/mcdm/reference_point_method.py +70 -0
  86. desdeo/problem/__init__.py +16 -11
  87. desdeo/problem/evaluator.py +4 -5
  88. desdeo/problem/external/__init__.py +18 -0
  89. desdeo/problem/external/core.py +356 -0
  90. desdeo/problem/external/pymoo_provider.py +266 -0
  91. desdeo/problem/external/runtime.py +44 -0
  92. desdeo/problem/gurobipy_evaluator.py +37 -12
  93. desdeo/problem/infix_parser.py +1 -16
  94. desdeo/problem/json_parser.py +7 -11
  95. desdeo/problem/pyomo_evaluator.py +25 -6
  96. desdeo/problem/schema.py +73 -55
  97. desdeo/problem/simulator_evaluator.py +65 -15
  98. desdeo/problem/testproblems/__init__.py +26 -11
  99. desdeo/problem/testproblems/benchmarks_server.py +120 -0
  100. desdeo/problem/testproblems/cake_problem.py +185 -0
  101. desdeo/problem/testproblems/dmitry_forest_problem_discrete.py +71 -0
  102. desdeo/problem/testproblems/forest_problem.py +77 -69
  103. desdeo/problem/testproblems/multi_valued_constraints.py +119 -0
  104. desdeo/problem/testproblems/{river_pollution_problem.py → river_pollution_problems.py} +28 -22
  105. desdeo/problem/testproblems/single_objective.py +289 -0
  106. desdeo/problem/testproblems/zdt_problem.py +4 -1
  107. desdeo/problem/utils.py +1 -1
  108. desdeo/tools/__init__.py +39 -21
  109. desdeo/tools/desc_gen.py +22 -0
  110. desdeo/tools/generics.py +22 -2
  111. desdeo/tools/group_scalarization.py +3090 -0
  112. desdeo/tools/indicators_binary.py +107 -1
  113. desdeo/tools/indicators_unary.py +3 -16
  114. desdeo/tools/message.py +33 -2
  115. desdeo/tools/non_dominated_sorting.py +4 -3
  116. desdeo/tools/patterns.py +9 -7
  117. desdeo/tools/pyomo_solver_interfaces.py +49 -36
  118. desdeo/tools/reference_vectors.py +118 -351
  119. desdeo/tools/scalarization.py +340 -1413
  120. desdeo/tools/score_bands.py +491 -328
  121. desdeo/tools/utils.py +117 -49
  122. desdeo/tools/visualizations.py +67 -0
  123. desdeo/utopia_stuff/utopia_problem.py +1 -1
  124. desdeo/utopia_stuff/utopia_problem_old.py +1 -1
  125. {desdeo-2.0.0.dist-info → desdeo-2.1.1.dist-info}/METADATA +47 -30
  126. desdeo-2.1.1.dist-info/RECORD +180 -0
  127. {desdeo-2.0.0.dist-info → desdeo-2.1.1.dist-info}/WHEEL +1 -1
  128. desdeo-2.0.0.dist-info/RECORD +0 -120
  129. /desdeo/api/utils/{logger.py → _logger.py} +0 -0
  130. {desdeo-2.0.0.dist-info → desdeo-2.1.1.dist-info/licenses}/LICENSE +0 -0
@@ -16,7 +16,13 @@ from desdeo.problem.schema import (
16
16
  VariableTypeEnum,
17
17
  )
18
18
 
19
- def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1, comparing: bool = False) -> Problem:
19
+
20
+ def forest_problem(
21
+ simulation_results: str = "./tests/data/alternatives_290124.csv",
22
+ treatment_key: str = "./tests/data/alternatives_key_290124.csv",
23
+ holding: int = 1,
24
+ comparing: bool = False,
25
+ ) -> Problem:
20
26
  r"""Defines a test forest problem that has TensorConstants and TensorVariables.
21
27
 
22
28
  The problem has TensorConstants V, W and P as vectors taking values from a data file and
@@ -45,8 +51,8 @@ def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1
45
51
  simulation_results (str): Location of the simulation results file.
46
52
  treatment_key (str): Location of the file with the treatment information.
47
53
  holding (int, optional): The number of the holding to be optimized. Defaults to 1.
48
- comparing (bool, optional): Determines if solutions are to be compared to those from the rahti app.
49
- Defaults to False.
54
+ comparing (bool, optional): This is only used for testing the method.
55
+ If comparing == True, the results are nonsense. Defaults to False.
50
56
 
51
57
  Returns:
52
58
  Problem: An instance of the test forest problem.
@@ -54,58 +60,60 @@ def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1
54
60
  df = pl.read_csv(simulation_results, schema_overrides={"unit": pl.Float64})
55
61
  df_key = pl.read_csv(treatment_key, schema_overrides={"unit": pl.Float64})
56
62
 
57
- selected_df_v = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "npv_5_percent"])
58
- unique_units = selected_df_v.unique(["unit"], maintain_order=True).get_column("unit")
59
- selected_df_v.group_by(["unit", "schedule"])
60
- rows_by_key = selected_df_v.rows_by_key(key=["unit", "schedule"])
61
- v_array = np.zeros((selected_df_v["unit"].n_unique(), selected_df_v["schedule"].n_unique()))
62
- for i in range(np.shape(v_array)[0]):
63
- for j in range(np.shape(v_array)[1]):
64
- if (unique_units[i], j) in rows_by_key:
65
- v_array[i][j] = rows_by_key[(unique_units[i], j)][0]
66
-
67
- # determine whether the results are to be compared to those from the rahti app (for testing purposes)
68
- # if compared, the stock values are calculated by substacting the value after 2025 period from
69
- # the value after the 2035 period (in other words, last value - first value)
70
- if comparing:
71
- selected_df_w = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "stock_2025", "stock_2035"])
72
- selected_df_w.group_by(["unit", "schedule"])
73
- rows_by_key = selected_df_w.rows_by_key(key=["unit", "schedule"])
74
- selected_df_key_w = df_key.select(["unit", "schedule", "treatment"])
75
- selected_df_key_w.group_by(["unit", "schedule"])
76
- rows_by_key_df_key = selected_df_key_w.rows_by_key(key=["unit", "schedule"])
77
- w_array = np.zeros((selected_df_w["unit"].n_unique(), selected_df_w["schedule"].n_unique()))
78
- for i in range(np.shape(w_array)[0]):
79
- for j in range(np.shape(w_array)[1]):
80
- if len(rows_by_key_df_key[(unique_units[i], j)]) == 0:
81
- continue
82
- if (unique_units[i], j) in rows_by_key:
83
- w_array[i][j] = rows_by_key[(unique_units[i], j)][0][1] - rows_by_key[(unique_units[i], j)][0][0]
84
- else:
85
- selected_df_w = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "stock_2035"])
86
- selected_df_w.group_by(["unit", "schedule"])
87
- rows_by_key = selected_df_w.rows_by_key(key=["unit", "schedule"])
88
- selected_df_key_w = df_key.select(["unit", "schedule", "treatment"])
89
- selected_df_key_w.group_by(["unit", "schedule"])
90
- rows_by_key_df_key = selected_df_key_w.rows_by_key(key=["unit", "schedule"])
91
- w_array = np.zeros((selected_df_w["unit"].n_unique(), selected_df_w["schedule"].n_unique()))
92
- for i in range(np.shape(w_array)[0]):
93
- for j in range(np.shape(w_array)[1]):
94
- if len(rows_by_key_df_key[(unique_units[i], j)]) == 0:
95
- continue
96
- if (unique_units[i], j) in rows_by_key:
97
- w_array[i][j] = rows_by_key[(unique_units[i], j)][0][0]
98
-
99
- selected_df_p = df.filter(pl.col("holding") == holding).select(
100
- ["unit", "schedule", "harvest_value_period_2025", "harvest_value_period_2030", "harvest_value_period_2035"]
63
+ df_joined = df.join(df_key, on=["holding", "unit", "schedule"], how="left")
64
+
65
+ selected_df = df_joined.filter(pl.col("holding") == holding).select(
66
+ [
67
+ "unit",
68
+ "schedule",
69
+ "npv_5_percent",
70
+ "stock_2025",
71
+ "stock_2035",
72
+ "harvest_value_period_2025",
73
+ "harvest_value_period_2030",
74
+ "harvest_value_period_2035",
75
+ "treatment",
76
+ ]
101
77
  )
102
- selected_df_p.group_by(["unit", "schedule"])
103
- rows_by_key = selected_df_p.rows_by_key(key=["unit", "schedule"])
104
- p_array = np.zeros((selected_df_p["unit"].n_unique(), selected_df_p["schedule"].n_unique()))
105
- for i in range(np.shape(p_array)[0]):
106
- for j in range(np.shape(p_array)[1]):
107
- if (unique_units[i], j) in rows_by_key:
108
- p_array[i][j] = sum(rows_by_key[(unique_units[i], j)][0])
78
+ unique_units = selected_df.unique(["unit"], maintain_order=True).get_column("unit")
79
+ n_units = len(unique_units)
80
+ unique_schedules = selected_df.unique(["schedule"], maintain_order=True).get_column("schedule")
81
+ n_schedules = len(unique_schedules)
82
+
83
+ v_array = np.zeros((n_units, n_schedules))
84
+ w_array = np.zeros((n_units, n_schedules))
85
+ p_array = np.zeros((n_units, n_schedules))
86
+
87
+ # This is not the fastest way to do this, but the code is probably more understandable
88
+ for i in range(n_units):
89
+ for j in range(n_schedules):
90
+ unit = unique_units[i]
91
+ schedule = unique_schedules[j]
92
+ print(f"unit {unit} schedule {schedule}")
93
+ if selected_df.filter((pl.col("unit") == unit) & (pl.col("schedule") == schedule)).height == 0:
94
+ continue
95
+ v_array[i][j] = (
96
+ selected_df.filter((pl.col("unit") == unit) & (pl.col("schedule") == schedule))
97
+ .select("npv_5_percent")
98
+ .item()
99
+ )
100
+ w_array[i][j] = (
101
+ selected_df.filter((pl.col("unit") == unit) & (pl.col("schedule") == schedule))
102
+ .select("stock_2035")
103
+ .item()
104
+ )
105
+ if comparing:
106
+ w_array[i][j] -= (
107
+ selected_df.filter((pl.col("unit") == unit) & (pl.col("schedule") == schedule))
108
+ .select("stock_2025")
109
+ .item()
110
+ )
111
+ # The harvest values are not going to be discounted like this
112
+ p_array[i][j] = sum(
113
+ selected_df.filter((pl.col("unit") == unit) & (pl.col("schedule") == schedule))
114
+ .select(["harvest_value_period_2025", "harvest_value_period_2030", "harvest_value_period_2035"])
115
+ .row(0)
116
+ )
109
117
 
110
118
  constants = []
111
119
  variables = []
@@ -114,25 +122,25 @@ def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1
114
122
  f_2_func = []
115
123
  f_3_func = []
116
124
  # define the constants V, W and P, decision variable X, constraints, and objective function expressions in one loop
117
- for i in range(np.shape(v_array)[0]):
125
+ for i in range(n_units):
118
126
  # Constants V, W and P
119
127
  v = TensorConstant(
120
- name=f"V_{i+1}",
121
- symbol=f"V_{i+1}",
128
+ name=f"V_{i + 1}",
129
+ symbol=f"V_{i + 1}",
122
130
  shape=[np.shape(v_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
123
131
  values=v_array[i].tolist(),
124
132
  )
125
133
  constants.append(v)
126
134
  w = TensorConstant(
127
- name=f"W_{i+1}",
128
- symbol=f"W_{i+1}",
135
+ name=f"W_{i + 1}",
136
+ symbol=f"W_{i + 1}",
129
137
  shape=[np.shape(w_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
130
138
  values=w_array[i].tolist(),
131
139
  )
132
140
  constants.append(w)
133
141
  p = TensorConstant(
134
- name=f"P_{i+1}",
135
- symbol=f"P_{i+1}",
142
+ name=f"P_{i + 1}",
143
+ symbol=f"P_{i + 1}",
136
144
  shape=[np.shape(p_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
137
145
  values=p_array[i].tolist(),
138
146
  )
@@ -140,8 +148,8 @@ def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1
140
148
  # Decision variable X
141
149
  constants.append(p)
142
150
  x = TensorVariable(
143
- name=f"X_{i+1}",
144
- symbol=f"X_{i+1}",
151
+ name=f"X_{i + 1}",
152
+ symbol=f"X_{i + 1}",
145
153
  variable_type=VariableTypeEnum.binary,
146
154
  shape=[np.shape(v_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
147
155
  lowerbounds=np.shape(v_array)[1] * [0],
@@ -152,10 +160,10 @@ def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1
152
160
 
153
161
  # Constraints
154
162
  con = Constraint(
155
- name=f"x_con_{i+1}",
156
- symbol=f"x_con_{i+1}",
163
+ name=f"x_con_{i + 1}",
164
+ symbol=f"x_con_{i + 1}",
157
165
  cons_type=ConstraintTypeEnum.EQ,
158
- func=f"Sum(X_{i+1}) - 1",
166
+ func=f"Sum(X_{i + 1}) - 1",
159
167
  is_linear=True,
160
168
  is_convex=False, # not checked
161
169
  is_twice_differentiable=True,
@@ -163,13 +171,13 @@ def forest_problem(simulation_results: str, treatment_key: str, holding: int = 1
163
171
  constraints.append(con)
164
172
 
165
173
  # Objective function expressions
166
- exprs = f"V_{i+1}@X_{i+1}"
174
+ exprs = f"V_{i + 1}@X_{i + 1}"
167
175
  f_1_func.append(exprs)
168
176
 
169
- exprs = f"W_{i+1}@X_{i+1}"
177
+ exprs = f"W_{i + 1}@X_{i + 1}"
170
178
  f_2_func.append(exprs)
171
179
 
172
- exprs = f"P_{i+1}@X_{i+1}"
180
+ exprs = f"P_{i + 1}@X_{i + 1}"
173
181
  f_3_func.append(exprs)
174
182
 
175
183
  # form the objective function sums
@@ -0,0 +1,119 @@
1
+ """Defines a test problem with a constraint that is multi-valued."""
2
+
3
+ from desdeo.problem import (
4
+ Constant,
5
+ Constraint,
6
+ ConstraintTypeEnum,
7
+ Objective,
8
+ ObjectiveTypeEnum,
9
+ Problem,
10
+ TensorConstant,
11
+ TensorVariable,
12
+ Variable,
13
+ VariableTypeEnum,
14
+ )
15
+
16
+
17
+ def multi_valued_constraint_problem() -> Problem:
18
+ r"""Defines a test problem with a multi-valued constraint.
19
+
20
+ The problem has two objectives, two variables, and two constraints, the other of which, is multi-valued.
21
+ The problem is defined as follows:
22
+ \[
23
+ \begin{aligned}
24
+ \text{Min} \quad
25
+ & f_1(x_1, x_2, y) = x_1^2 + x_2^2 + y^2, \\[4pt]
26
+ \text{Min} \quad
27
+ & f_2(x_1, x_2, y) = (x_1 - 2)^2 + (x_2 - 1)^2 + (y - 1)^2, \\[6pt]
28
+ \text{subject to} \quad
29
+ & g(x_1, x_2, y) = x_1^2 + x_2 + y - 2 \le 0, \\[4pt]
30
+ & G(x_1, x_2) = A
31
+ \begin{bmatrix}
32
+ x_1 \\[2pt]
33
+ x_2
34
+ \end{bmatrix}
35
+ \le 0,
36
+ \quad
37
+ A =
38
+ \begin{bmatrix}
39
+ 1 & -1 \\[2pt]
40
+ -1 & -2
41
+ \end{bmatrix}.
42
+ \end{aligned}
43
+ \]
44
+
45
+
46
+ Returns:
47
+ Problem: the problem model.
48
+ """
49
+ xs = TensorVariable(
50
+ name="x",
51
+ symbol="X",
52
+ variable_type=VariableTypeEnum.real,
53
+ shape=[2, 1],
54
+ lowerbounds=-5.0,
55
+ upperbounds=5.0,
56
+ initial_values=0.1,
57
+ )
58
+
59
+ y = Variable(
60
+ name="y",
61
+ symbol="y",
62
+ variable_type=VariableTypeEnum.real,
63
+ lowerbound=-10.0,
64
+ upperbound=10.0,
65
+ initial_value=0.1,
66
+ )
67
+
68
+ a = TensorConstant(name="A", symbol="A", shape=[2, 2], values=[[1.0, -1.0], [-1.0, -2.0]])
69
+
70
+ one = Constant(name="one", symbol="one", value=1.0)
71
+
72
+ f_1_expr = "X[1, 1]**2 + X[2, 1]**2 + y**2"
73
+ f_2_expr = "(X[1, 1] - 2)**2 + (X[2, 1] - one)**2 + (y - one)**2"
74
+
75
+ g_1_expr = "X[1, 1]**2 + X[2, 1] + y - 2"
76
+ big_g_expr = "A @ X"
77
+
78
+ f_1 = Objective(
79
+ name="f1",
80
+ symbol="f_1",
81
+ func=f_1_expr,
82
+ objective_type=ObjectiveTypeEnum.analytical,
83
+ ideal=0.0,
84
+ nadir=150.0,
85
+ is_twice_differentiable=True,
86
+ )
87
+
88
+ f_2 = Objective(
89
+ name="f2",
90
+ symbol="f_2",
91
+ func=f_2_expr,
92
+ ideal=0.0,
93
+ nadir=206.0,
94
+ objective_type=ObjectiveTypeEnum.analytical,
95
+ is_twice_differentiable=True,
96
+ )
97
+
98
+ g_1 = Constraint(
99
+ name="g1", symbol="g_1", cons_type=ConstraintTypeEnum.LTE, func=g_1_expr, is_twice_differentiable=True
100
+ )
101
+
102
+ big_g = Constraint(
103
+ name="big_g",
104
+ symbol="G",
105
+ cons_type=ConstraintTypeEnum.LTE,
106
+ func=big_g_expr,
107
+ is_twice_differentiable=True,
108
+ is_linear=True,
109
+ is_convex=True,
110
+ )
111
+
112
+ return Problem(
113
+ name="Multi-valued-constraint problem",
114
+ description="Problem for testing problems with multi-valued constraints.",
115
+ constants=[a, one],
116
+ variables=[xs, y],
117
+ constraints=[g_1, big_g],
118
+ objectives=[f_1, f_2],
119
+ )
@@ -1,3 +1,5 @@
1
+ """Variants of the river pollution problem are defined here."""
2
+
1
3
  from pathlib import Path
2
4
 
3
5
  import polars as pl
@@ -12,6 +14,7 @@ from desdeo.problem.schema import (
12
14
  VariableTypeEnum,
13
15
  )
14
16
 
17
+
15
18
  def river_pollution_problem(*, five_objective_variant: bool = True) -> Problem:
16
19
  r"""Create a pydantic dataclass representation of the river pollution problem with either five or four variables.
17
20
 
@@ -164,34 +167,34 @@ def river_pollution_problem_discrete(*, five_objective_variant: bool = True) ->
164
167
  Heidelberg, 1997.
165
168
  """
166
169
  filename = "datasets/river_poll_4_objs.csv"
167
- trueVarNames = {"x_1": "BOD", "x_2": "DO"}
168
- trueObjNames = {"f1": "DO city", "f2": "DO municipality", "f3": "ROI fishery", "f4": "ROI city"}
170
+ true_var_names = {"x_1": "BOD", "x_2": "DO"}
171
+ true_obj_names = {"f1": "DO city", "f2": "DO municipality", "f3": "ROI fishery", "f4": "ROI city"}
169
172
  if five_objective_variant:
170
173
  filename = "datasets/river_poll_5_objs.csv"
171
- trueObjNames["f5"] = "BOD deviation"
174
+ true_obj_names["f5"] = "BOD deviation"
172
175
 
173
176
  path = Path(__file__).parent.parent.parent.parent / filename
174
177
  data = pl.read_csv(path, has_header=True)
175
178
 
176
179
  variables = [
177
180
  Variable(
178
- name=trueVarNames[varName],
181
+ name=true_var_names[varName],
179
182
  symbol=varName,
180
183
  variable_type=VariableTypeEnum.real,
181
184
  lowerbound=0.3,
182
185
  upperbound=1.0,
183
186
  initial_value=0.65,
184
187
  )
185
- for varName in trueVarNames
188
+ for varName in true_var_names
186
189
  ]
187
190
  maximize = {"f1": True, "f2": True, "f3": True, "f4": True, "f5": False}
188
- ideal = {objName: (data[objName].max() if maximize[objName] else data[objName].min()) for objName in trueObjNames}
189
- nadir = {objName: (data[objName].min() if maximize[objName] else data[objName].max()) for objName in trueObjNames}
191
+ ideal = {objName: (data[objName].max() if maximize[objName] else data[objName].min()) for objName in true_obj_names}
192
+ nadir = {objName: (data[objName].min() if maximize[objName] else data[objName].max()) for objName in true_obj_names}
190
193
  units = {"f1": "mg/L", "f2": "mg/L", "f3": "%", "f4": "%", "f5": "mg/L"}
191
194
 
192
195
  objectives = [
193
196
  Objective(
194
- name=trueObjNames[objName],
197
+ name=true_obj_names[objName],
195
198
  symbol=objName,
196
199
  func=None,
197
200
  unit=units[objName],
@@ -200,12 +203,12 @@ def river_pollution_problem_discrete(*, five_objective_variant: bool = True) ->
200
203
  ideal=ideal[objName],
201
204
  nadir=nadir[objName],
202
205
  )
203
- for objName in trueObjNames
206
+ for objName in true_obj_names
204
207
  ]
205
208
 
206
209
  discrete_def = DiscreteRepresentation(
207
- variable_values=data[list(trueVarNames.keys())].to_dict(),
208
- objective_values=data[list(trueObjNames.keys())].to_dict(),
210
+ variable_values=data[list(true_var_names.keys())].to_dict(),
211
+ objective_values=data[list(true_obj_names.keys())].to_dict(),
209
212
  )
210
213
 
211
214
  return Problem(
@@ -254,8 +257,10 @@ def river_pollution_scenario() -> Problem:
254
257
  $$
255
258
  \\begin{equation}
256
259
  \\begin{array}{rll}
257
- \\text{maximize} & f_1(\\mathbf{x}) = & \\alpha + \\left(\\log\\left(\\left(\\frac{\\beta}{2} - 1.14\\right)^2\\right) + \\beta^3\\right) x_1 \\\\
258
- \\text{maximize} & f_2(\\mathbf{x}) = & \\gamma + \\delta x_1 + \\xi x_2 + \\frac{0.01}{\\eta - x_1^2} + \\frac{0.30}{\\eta - x_2^2} \\\\
260
+ \\text{maximize} & f_1(\\mathbf{x}) = & \\alpha + \\left(\\log\\left(\\left(\\frac{\\beta}{2}
261
+ - 1.14\\right)^2\\right) + \\beta^3\\right) x_1 \\\\
262
+ \\text{maximize} & f_2(\\mathbf{x}) = & \\gamma + \\delta x_1 + \\xi x_2 + \\frac{0.01}{\\eta - x_1^2}
263
+ + \\frac{0.30}{\\eta - x_2^2} \\\\
259
264
  \\text{maximize} & f_3(\\mathbf{x}) = & r - \\frac{0.71}{1.09 - x_1^2} \\\\
260
265
  \\text{minimize} & f_4(\\mathbf{x}) = & -0.96 + \\frac{0.96}{1.09 - x_2^2} \\\\
261
266
  \\text{subject to} & & 0.3 \\leq x_1, x_2 \\leq 1.0.
@@ -278,7 +283,7 @@ def river_pollution_scenario() -> Problem:
278
283
  Analysis: Proceedings of the XIth International Conference on MCDM, 1-6
279
284
  August 1994, Coimbra, Portugal. Berlin, Heidelberg: Springer Berlin
280
285
  Heidelberg, 1997.
281
- """
286
+ """ # noqa: RUF002
282
287
  num_scenarios = 6
283
288
  scenario_key_stub = "scenario"
284
289
 
@@ -351,22 +356,23 @@ def river_pollution_scenario() -> Problem:
351
356
  scenario_keys = []
352
357
 
353
358
  for i in range(num_scenarios):
354
- scenario_key = f"{scenario_key_stub}_{i+1}"
359
+ scenario_key = f"{scenario_key_stub}_{i + 1}"
355
360
  scenario_keys.append(scenario_key)
356
361
 
357
- gamma_expr = f"Ln(alpha[{i+1}]/2 - 1) + alpha[{i+1}]/2 + 1.5"
362
+ gamma_expr = f"Ln(alpha[{i + 1}]/2 - 1) + alpha[{i + 1}]/2 + 1.5"
358
363
 
359
- f1_expr = f"alpha[{i+1}] + (Ln((beta[{i+1}]/2 - 1.14)**2) + beta[{i+1}]**3)*x_1"
364
+ f1_expr = f"alpha[{i + 1}] + (Ln((beta[{i + 1}]/2 - 1.14)**2) + beta[{i + 1}]**3)*x_1"
360
365
  f2_expr = (
361
- f"{gamma_expr} + delta[{i+1}]*x_1 + xi[{i+1}]*x_2 + 0.01/(eta[{i+1}] - x_1**2) + 0.3/(eta[{i+1}] - x_2**2)"
366
+ f"{gamma_expr} + delta[{i + 1}]*x_1 + xi[{i + 1}]*x_2 + 0.01/(eta[{i + 1}] - x_1**2) "
367
+ f"+ 0.3/(eta[{i + 1}] - x_2**2)"
362
368
  )
363
- f3_expr = f"r[{i+1}] - 0.71/(1.09 - x_1**2)"
369
+ f3_expr = f"r[{i + 1}] - 0.71/(1.09 - x_1**2)"
364
370
 
365
371
  # f1
366
372
  objectives.append(
367
373
  Objective(
368
374
  name="DO level city",
369
- symbol=f"f1_{i+1}",
375
+ symbol=f"f1_{i + 1}",
370
376
  scenario_keys=[scenario_key],
371
377
  func=f1_expr,
372
378
  objective_type=ObjectiveTypeEnum.analytical,
@@ -381,7 +387,7 @@ def river_pollution_scenario() -> Problem:
381
387
  objectives.append(
382
388
  Objective(
383
389
  name="DO level fishery",
384
- symbol=f"f2_{i+1}",
390
+ symbol=f"f2_{i + 1}",
385
391
  scenario_keys=[scenario_key],
386
392
  func=f2_expr,
387
393
  objective_type=ObjectiveTypeEnum.analytical,
@@ -396,7 +402,7 @@ def river_pollution_scenario() -> Problem:
396
402
  objectives.append(
397
403
  Objective(
398
404
  name="Return of investment",
399
- symbol=f"f3_{i+1}",
405
+ symbol=f"f3_{i + 1}",
400
406
  scenario_keys=[scenario_key],
401
407
  func=f3_expr,
402
408
  objective_type=ObjectiveTypeEnum.analytical,