desdeo 1.2__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- desdeo/__init__.py +8 -8
- desdeo/api/README.md +73 -0
- desdeo/api/__init__.py +15 -0
- desdeo/api/app.py +40 -0
- desdeo/api/config.py +69 -0
- desdeo/api/config.toml +53 -0
- desdeo/api/db.py +25 -0
- desdeo/api/db_init.py +79 -0
- desdeo/api/db_models.py +164 -0
- desdeo/api/malaga_db_init.py +27 -0
- desdeo/api/models/__init__.py +66 -0
- desdeo/api/models/archive.py +34 -0
- desdeo/api/models/preference.py +90 -0
- desdeo/api/models/problem.py +507 -0
- desdeo/api/models/reference_point_method.py +18 -0
- desdeo/api/models/session.py +46 -0
- desdeo/api/models/state.py +96 -0
- desdeo/api/models/user.py +51 -0
- desdeo/api/routers/_NAUTILUS.py +245 -0
- desdeo/api/routers/_NAUTILUS_navigator.py +233 -0
- desdeo/api/routers/_NIMBUS.py +762 -0
- desdeo/api/routers/__init__.py +5 -0
- desdeo/api/routers/problem.py +110 -0
- desdeo/api/routers/reference_point_method.py +117 -0
- desdeo/api/routers/session.py +76 -0
- desdeo/api/routers/test.py +16 -0
- desdeo/api/routers/user_authentication.py +366 -0
- desdeo/api/schema.py +94 -0
- desdeo/api/tests/__init__.py +0 -0
- desdeo/api/tests/conftest.py +59 -0
- desdeo/api/tests/test_models.py +701 -0
- desdeo/api/tests/test_routes.py +216 -0
- desdeo/api/utils/database.py +274 -0
- desdeo/api/utils/logger.py +29 -0
- desdeo/core.py +27 -0
- desdeo/emo/__init__.py +29 -0
- desdeo/emo/hooks/archivers.py +172 -0
- desdeo/emo/methods/EAs.py +418 -0
- desdeo/emo/methods/__init__.py +0 -0
- desdeo/emo/methods/bases.py +59 -0
- desdeo/emo/operators/__init__.py +1 -0
- desdeo/emo/operators/crossover.py +780 -0
- desdeo/emo/operators/evaluator.py +118 -0
- desdeo/emo/operators/generator.py +356 -0
- desdeo/emo/operators/mutation.py +1053 -0
- desdeo/emo/operators/selection.py +1036 -0
- desdeo/emo/operators/termination.py +178 -0
- desdeo/explanations/__init__.py +6 -0
- desdeo/explanations/explainer.py +100 -0
- desdeo/explanations/utils.py +90 -0
- desdeo/mcdm/__init__.py +19 -0
- desdeo/mcdm/nautili.py +345 -0
- desdeo/mcdm/nautilus.py +477 -0
- desdeo/mcdm/nautilus_navigator.py +655 -0
- desdeo/mcdm/nimbus.py +417 -0
- desdeo/mcdm/pareto_navigator.py +269 -0
- desdeo/mcdm/reference_point_method.py +116 -0
- desdeo/problem/__init__.py +79 -0
- desdeo/problem/evaluator.py +561 -0
- desdeo/problem/gurobipy_evaluator.py +562 -0
- desdeo/problem/infix_parser.py +341 -0
- desdeo/problem/json_parser.py +944 -0
- desdeo/problem/pyomo_evaluator.py +468 -0
- desdeo/problem/schema.py +1808 -0
- desdeo/problem/simulator_evaluator.py +298 -0
- desdeo/problem/sympy_evaluator.py +244 -0
- desdeo/problem/testproblems/__init__.py +73 -0
- desdeo/problem/testproblems/binh_and_korn_problem.py +88 -0
- desdeo/problem/testproblems/dtlz2_problem.py +102 -0
- desdeo/problem/testproblems/forest_problem.py +275 -0
- desdeo/problem/testproblems/knapsack_problem.py +163 -0
- desdeo/problem/testproblems/mcwb_problem.py +831 -0
- desdeo/problem/testproblems/mixed_variable_dimenrions_problem.py +83 -0
- desdeo/problem/testproblems/momip_problem.py +172 -0
- desdeo/problem/testproblems/nimbus_problem.py +143 -0
- desdeo/problem/testproblems/pareto_navigator_problem.py +89 -0
- desdeo/problem/testproblems/re_problem.py +492 -0
- desdeo/problem/testproblems/river_pollution_problem.py +434 -0
- desdeo/problem/testproblems/rocket_injector_design_problem.py +140 -0
- desdeo/problem/testproblems/simple_problem.py +351 -0
- desdeo/problem/testproblems/simulator_problem.py +92 -0
- desdeo/problem/testproblems/spanish_sustainability_problem.py +945 -0
- desdeo/problem/testproblems/zdt_problem.py +271 -0
- desdeo/problem/utils.py +245 -0
- desdeo/tools/GenerateReferencePoints.py +181 -0
- desdeo/tools/__init__.py +102 -0
- desdeo/tools/generics.py +145 -0
- desdeo/tools/gurobipy_solver_interfaces.py +258 -0
- desdeo/tools/indicators_binary.py +11 -0
- desdeo/tools/indicators_unary.py +375 -0
- desdeo/tools/interaction_schema.py +38 -0
- desdeo/tools/intersection.py +54 -0
- desdeo/tools/iterative_pareto_representer.py +99 -0
- desdeo/tools/message.py +234 -0
- desdeo/tools/ng_solver_interfaces.py +199 -0
- desdeo/tools/non_dominated_sorting.py +133 -0
- desdeo/tools/patterns.py +281 -0
- desdeo/tools/proximal_solver.py +99 -0
- desdeo/tools/pyomo_solver_interfaces.py +464 -0
- desdeo/tools/reference_vectors.py +462 -0
- desdeo/tools/scalarization.py +3138 -0
- desdeo/tools/scipy_solver_interfaces.py +454 -0
- desdeo/tools/score_bands.py +464 -0
- desdeo/tools/utils.py +320 -0
- desdeo/utopia_stuff/__init__.py +0 -0
- desdeo/utopia_stuff/data/1.json +15 -0
- desdeo/utopia_stuff/data/2.json +13 -0
- desdeo/utopia_stuff/data/3.json +15 -0
- desdeo/utopia_stuff/data/4.json +17 -0
- desdeo/utopia_stuff/data/5.json +15 -0
- desdeo/utopia_stuff/from_json.py +40 -0
- desdeo/utopia_stuff/reinit_user.py +38 -0
- desdeo/utopia_stuff/utopia_db_init.py +212 -0
- desdeo/utopia_stuff/utopia_problem.py +403 -0
- desdeo/utopia_stuff/utopia_problem_old.py +415 -0
- desdeo/utopia_stuff/utopia_reference_solutions.py +79 -0
- desdeo-2.0.0.dist-info/LICENSE +21 -0
- desdeo-2.0.0.dist-info/METADATA +168 -0
- desdeo-2.0.0.dist-info/RECORD +120 -0
- {desdeo-1.2.dist-info → desdeo-2.0.0.dist-info}/WHEEL +1 -1
- desdeo-1.2.dist-info/METADATA +0 -16
- desdeo-1.2.dist-info/RECORD +0 -4
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import numpy as np
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import polars as pl
|
|
5
|
+
|
|
6
|
+
from desdeo.problem.schema import (
|
|
7
|
+
Constant,
|
|
8
|
+
Constraint,
|
|
9
|
+
ConstraintTypeEnum,
|
|
10
|
+
DiscreteRepresentation,
|
|
11
|
+
ExtraFunction,
|
|
12
|
+
Objective,
|
|
13
|
+
ObjectiveTypeEnum,
|
|
14
|
+
Problem,
|
|
15
|
+
TensorConstant,
|
|
16
|
+
TensorVariable,
|
|
17
|
+
Variable,
|
|
18
|
+
VariableTypeEnum,
|
|
19
|
+
)
|
|
20
|
+
from desdeo.tools.utils import available_solvers, payoff_table_method
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def utopia_problem(
|
|
24
|
+
simulation_results: str, # when ran from the same place always, these can be replaced with just the dir name
|
|
25
|
+
treatment_key: str,
|
|
26
|
+
problem_name: str = "Forest problem",
|
|
27
|
+
holding: int = 1,
|
|
28
|
+
) -> tuple[Problem, dict]:
|
|
29
|
+
r"""Defines a test forest problem that has TensorConstants and TensorVariables.
|
|
30
|
+
|
|
31
|
+
The problem has TensorConstants V, W and P as vectors taking values from a data file and
|
|
32
|
+
TensorVariables X_n, where n is the number of units in the data, as vectors matching the constants in shape.
|
|
33
|
+
The variables are binary and each variable vector X_i has one variable with the value 1 while others have value 0.
|
|
34
|
+
The variable with the value 1 for each vector X_i represents the optimal plan for the corresponding unit i.
|
|
35
|
+
The three objective functions f_1, f_2, f_3 represent the net present value, wood volume at the end of
|
|
36
|
+
the planning period, and the profit from harvesting.
|
|
37
|
+
All of the objective functions are to be maximized.
|
|
38
|
+
The problem is defined as follows:
|
|
39
|
+
|
|
40
|
+
\begin{align}
|
|
41
|
+
\mbox{maximize~} & \sum_{j=1}^N\sum_{i \in I_j} v_{ij} x_{ij} & \\
|
|
42
|
+
\mbox{maximize~} & \sum_{j=1}^N\sum_{i \in I_j} w_{ij} x_{ij} & \\
|
|
43
|
+
\mbox{maximize~} & \sum_{j=1}^N\sum_{i \in I_j} p_{ij} x_{ij} & \\
|
|
44
|
+
\nonumber\\
|
|
45
|
+
\mbox{subject to~} & \sum\limits_{i \in I_j} x_{ij} = 1, & \forall j = 1 \ldots N \\
|
|
46
|
+
& x_{ij}\in \{0,1\}& \forall j = 1 \ldots N, ~\forall i\in I_j,
|
|
47
|
+
\end{align}
|
|
48
|
+
|
|
49
|
+
where $x_{ij}$ are decision variables representing the choice of implementing management plan $i$ in stand $j$,
|
|
50
|
+
and $I_j$ is the set of available management plans for stand $j$. For each plan $i$ in stand $j$
|
|
51
|
+
the net present value, wood volume at the end of the planning period, and the profit from harvesting
|
|
52
|
+
are represented by $v_{ij}$, $w_{ij}$, and $p_{ij}$ respectively.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
holding (int, optional): The number of the holding to be optimized. Defaults to 1.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Problem: An instance of the test forest problem.
|
|
59
|
+
"""
|
|
60
|
+
schedule_dict = {}
|
|
61
|
+
|
|
62
|
+
discounting_factor = 3 # This can be 1, 2, 3, 4 or 5. It represents %
|
|
63
|
+
discounting = [
|
|
64
|
+
(1 - 0.01 * discounting_factor) ** 2,
|
|
65
|
+
(1 - 0.01 * discounting_factor) ** 7,
|
|
66
|
+
(1 - 0.01 * discounting_factor) ** 17,
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
# df = pl.read_csv(Path("tests/data/alternatives_290124.csv"), dtypes={"unit": pl.Float64})
|
|
70
|
+
df = pl.read_csv(Path(simulation_results), dtypes={"unit": pl.Float64})
|
|
71
|
+
# df_key = pl.read_csv(Path("tests/data/alternatives_key_290124.csv"), dtypes={"unit": pl.Float64})
|
|
72
|
+
df_key = pl.read_csv(Path(treatment_key), dtypes={"unit": pl.Float64})
|
|
73
|
+
|
|
74
|
+
# Calculate the total wood volume at the start
|
|
75
|
+
selected_df_v0 = df.filter(pl.col("holding") == holding).select(["unit", "stock_0"]).unique()
|
|
76
|
+
wood_volume_0 = int(selected_df_v0["stock_0"].sum())
|
|
77
|
+
|
|
78
|
+
selected_df_v = df.filter(pl.col("holding") == holding).select(
|
|
79
|
+
["unit", "schedule", f"npv_{discounting_factor}_percent"]
|
|
80
|
+
)
|
|
81
|
+
unique_units = selected_df_v.unique(["unit"], maintain_order=True).get_column("unit")
|
|
82
|
+
selected_df_v.group_by(["unit", "schedule"])
|
|
83
|
+
rows_by_key = selected_df_v.rows_by_key(key=["unit", "schedule"])
|
|
84
|
+
v_array = np.zeros((selected_df_v["unit"].n_unique(), selected_df_v["schedule"].n_unique()))
|
|
85
|
+
for i in range(np.shape(v_array)[0]):
|
|
86
|
+
for j in range(np.shape(v_array)[1]):
|
|
87
|
+
if (unique_units[i], j) in rows_by_key:
|
|
88
|
+
v_array[i][j] = rows_by_key[(unique_units[i], j)][0]
|
|
89
|
+
|
|
90
|
+
selected_df_w = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "stock_20"])
|
|
91
|
+
selected_df_w.group_by(["unit", "schedule"])
|
|
92
|
+
rows_by_key = selected_df_w.rows_by_key(key=["unit", "schedule"])
|
|
93
|
+
selected_df_key_w = df_key.select(["unit", "schedule", "treatment"])
|
|
94
|
+
selected_df_key_w.group_by(["unit", "schedule"])
|
|
95
|
+
rows_by_key_df_key = selected_df_key_w.rows_by_key(key=["unit", "schedule"])
|
|
96
|
+
w_array = np.zeros((selected_df_w["unit"].n_unique(), selected_df_w["schedule"].n_unique()))
|
|
97
|
+
for i in range(np.shape(w_array)[0]):
|
|
98
|
+
for j in range(np.shape(w_array)[1]):
|
|
99
|
+
if len(rows_by_key_df_key[(unique_units[i], j)]) == 0:
|
|
100
|
+
continue
|
|
101
|
+
if (unique_units[i], j) in rows_by_key:
|
|
102
|
+
w_array[i][j] = rows_by_key[(unique_units[i], j)][0]
|
|
103
|
+
|
|
104
|
+
"""
|
|
105
|
+
selected_df_p = df.filter(pl.col("holding") == holding).select(
|
|
106
|
+
["unit", "schedule", "harvest_value_period_2025", "harvest_value_period_2030", "harvest_value_period_2035"]
|
|
107
|
+
)
|
|
108
|
+
selected_df_p.group_by(["unit", "schedule"])
|
|
109
|
+
rows_by_key = selected_df_p.rows_by_key(key=["unit", "schedule"])
|
|
110
|
+
p_array = np.zeros((selected_df_p["unit"].n_unique(), selected_df_p["schedule"].n_unique()))
|
|
111
|
+
discounting = [0.95**5, 0.95**10, 0.95**15]
|
|
112
|
+
for i in range(np.shape(p_array)[0]):
|
|
113
|
+
for j in range(np.shape(p_array)[1]):
|
|
114
|
+
if (unique_units[i], j) in rows_by_key:
|
|
115
|
+
p_array[i][j] = (
|
|
116
|
+
sum(x * y for x, y in zip(rows_by_key[(unique_units[i], j)][0], discounting, strict=True)) + 1e-6
|
|
117
|
+
) # the 1E-6 is to deal with an annoying corner case, don't worry about it
|
|
118
|
+
v_array[i][j] += p_array[i][j]
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
selected_df_p1 = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "harvest_value_5"])
|
|
122
|
+
selected_df_p1.group_by(["unit", "schedule"])
|
|
123
|
+
rows_by_key = selected_df_p1.rows_by_key(key=["unit", "schedule"])
|
|
124
|
+
p1_array = np.zeros((selected_df_p1["unit"].n_unique(), selected_df_p1["schedule"].n_unique()))
|
|
125
|
+
for i in range(np.shape(p1_array)[0]):
|
|
126
|
+
for j in range(np.shape(p1_array)[1]):
|
|
127
|
+
if (unique_units[i], j) in rows_by_key:
|
|
128
|
+
p1_array[i][j] = rows_by_key[(unique_units[i], j)][0] + 1e-6
|
|
129
|
+
|
|
130
|
+
selected_df_p2 = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "harvest_value_10"])
|
|
131
|
+
selected_df_p2.group_by(["unit", "schedule"])
|
|
132
|
+
rows_by_key = selected_df_p2.rows_by_key(key=["unit", "schedule"])
|
|
133
|
+
p2_array = np.zeros((selected_df_p2["unit"].n_unique(), selected_df_p2["schedule"].n_unique()))
|
|
134
|
+
for i in range(np.shape(p2_array)[0]):
|
|
135
|
+
for j in range(np.shape(p2_array)[1]):
|
|
136
|
+
if (unique_units[i], j) in rows_by_key:
|
|
137
|
+
p2_array[i][j] = rows_by_key[(unique_units[i], j)][0] + 1e-6
|
|
138
|
+
|
|
139
|
+
selected_df_p3 = df.filter(pl.col("holding") == holding).select(["unit", "schedule", "harvest_value_20"])
|
|
140
|
+
selected_df_p3.group_by(["unit", "schedule"])
|
|
141
|
+
rows_by_key = selected_df_p3.rows_by_key(key=["unit", "schedule"])
|
|
142
|
+
p3_array = np.zeros((selected_df_p3["unit"].n_unique(), selected_df_p3["schedule"].n_unique()))
|
|
143
|
+
for i in range(np.shape(p3_array)[0]):
|
|
144
|
+
for j in range(np.shape(p3_array)[1]):
|
|
145
|
+
if (unique_units[i], j) in rows_by_key:
|
|
146
|
+
p3_array[i][j] = rows_by_key[(unique_units[i], j)][0] + 1e-6
|
|
147
|
+
|
|
148
|
+
constants = []
|
|
149
|
+
variables = []
|
|
150
|
+
constraints = []
|
|
151
|
+
f_1_func = []
|
|
152
|
+
f_2_func = []
|
|
153
|
+
p1_func = []
|
|
154
|
+
p2_func = []
|
|
155
|
+
p3_func = []
|
|
156
|
+
# define the constants V, W and P, decision variable X, constraints, and objective function expressions in one loop
|
|
157
|
+
for i in range(np.shape(v_array)[0]):
|
|
158
|
+
# Constants V, W and P
|
|
159
|
+
v = TensorConstant(
|
|
160
|
+
name=f"V_{i+1}",
|
|
161
|
+
symbol=f"V_{i+1}",
|
|
162
|
+
shape=[np.shape(v_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
|
|
163
|
+
values=v_array[i].tolist(),
|
|
164
|
+
)
|
|
165
|
+
constants.append(v)
|
|
166
|
+
w = TensorConstant(
|
|
167
|
+
name=f"W_{i+1}",
|
|
168
|
+
symbol=f"W_{i+1}",
|
|
169
|
+
shape=[np.shape(w_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
|
|
170
|
+
values=w_array[i].tolist(),
|
|
171
|
+
)
|
|
172
|
+
constants.append(w)
|
|
173
|
+
p1 = TensorConstant(
|
|
174
|
+
name=f"P1_{i+1}",
|
|
175
|
+
symbol=f"P1_{i+1}",
|
|
176
|
+
shape=[np.shape(p1_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
|
|
177
|
+
values=p1_array[i].tolist(),
|
|
178
|
+
)
|
|
179
|
+
constants.append(p1)
|
|
180
|
+
|
|
181
|
+
p2 = TensorConstant(
|
|
182
|
+
name=f"P2_{i+1}",
|
|
183
|
+
symbol=f"P2_{i+1}",
|
|
184
|
+
shape=[np.shape(p2_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
|
|
185
|
+
values=p2_array[i].tolist(),
|
|
186
|
+
)
|
|
187
|
+
constants.append(p2)
|
|
188
|
+
|
|
189
|
+
p3 = TensorConstant(
|
|
190
|
+
name=f"P3_{i+1}",
|
|
191
|
+
symbol=f"P3_{i+1}",
|
|
192
|
+
shape=[np.shape(p3_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
|
|
193
|
+
values=p3_array[i].tolist(),
|
|
194
|
+
)
|
|
195
|
+
constants.append(p3)
|
|
196
|
+
|
|
197
|
+
# Decision variable X
|
|
198
|
+
x = TensorVariable(
|
|
199
|
+
name=f"X_{i+1}",
|
|
200
|
+
symbol=f"X_{i+1}",
|
|
201
|
+
variable_type=VariableTypeEnum.binary,
|
|
202
|
+
shape=[np.shape(v_array)[1]], # NOTE: vectors have to be of form [2] instead of [2,1] or [1,2]
|
|
203
|
+
lowerbounds=np.shape(v_array)[1] * [0],
|
|
204
|
+
upperbounds=np.shape(v_array)[1] * [1],
|
|
205
|
+
initial_values=np.shape(v_array)[1] * [0],
|
|
206
|
+
)
|
|
207
|
+
variables.append(x)
|
|
208
|
+
|
|
209
|
+
# Fill out the dict with information about treatments associated with X_{i+1}
|
|
210
|
+
treatment_list = (
|
|
211
|
+
df_key.filter((pl.col("holding") == holding) & (pl.col("unit") == unique_units[i]))
|
|
212
|
+
.get_column("treatment")
|
|
213
|
+
.to_list()
|
|
214
|
+
)
|
|
215
|
+
schedule_dict[f"X_{i+1}"] = dict(zip(range(len(treatment_list)), treatment_list, strict=True))
|
|
216
|
+
schedule_dict[f"X_{i+1}"]["unit"] = unique_units[i]
|
|
217
|
+
|
|
218
|
+
# Constraints
|
|
219
|
+
con = Constraint(
|
|
220
|
+
name=f"x_con_{i+1}",
|
|
221
|
+
symbol=f"x_con_{i+1}",
|
|
222
|
+
cons_type=ConstraintTypeEnum.EQ,
|
|
223
|
+
func=f"Sum(X_{i+1}) - 1",
|
|
224
|
+
is_twice_differentiable=True,
|
|
225
|
+
)
|
|
226
|
+
constraints.append(con)
|
|
227
|
+
|
|
228
|
+
# Objective function expressions
|
|
229
|
+
exprs = f"V_{i+1}@X_{i+1}"
|
|
230
|
+
f_1_func.append(exprs)
|
|
231
|
+
|
|
232
|
+
exprs = f"W_{i+1}@X_{i+1}"
|
|
233
|
+
f_2_func.append(exprs)
|
|
234
|
+
|
|
235
|
+
exprs = f"P1_{i+1}@X_{i+1}"
|
|
236
|
+
p1_func.append(exprs)
|
|
237
|
+
|
|
238
|
+
exprs = f"P2_{i+1}@X_{i+1}"
|
|
239
|
+
p2_func.append(exprs)
|
|
240
|
+
|
|
241
|
+
exprs = f"P3_{i+1}@X_{i+1}"
|
|
242
|
+
p3_func.append(exprs)
|
|
243
|
+
|
|
244
|
+
for i in range(1, 4):
|
|
245
|
+
pvar = Variable(name=f"P_{i}", symbol=f"P_{i}", variable_type=VariableTypeEnum.real, lowerbound=0)
|
|
246
|
+
variables.append(pvar)
|
|
247
|
+
|
|
248
|
+
vvar = Variable(name="V_end", symbol="V_end", variable_type=VariableTypeEnum.real, lowerbound=0)
|
|
249
|
+
variables.append(vvar)
|
|
250
|
+
|
|
251
|
+
# get the remainder value of the forest into decision variable V_end
|
|
252
|
+
v_func = "V_end - " + " - ".join(f_1_func)
|
|
253
|
+
con = Constraint(
|
|
254
|
+
name="v_con",
|
|
255
|
+
symbol="v_con",
|
|
256
|
+
cons_type=ConstraintTypeEnum.EQ,
|
|
257
|
+
func=v_func,
|
|
258
|
+
is_twice_differentiable=True,
|
|
259
|
+
)
|
|
260
|
+
constraints.append(con)
|
|
261
|
+
|
|
262
|
+
# These are here, so that we can get the harvesting incomes into decision variables P_i
|
|
263
|
+
p1_func = "P_1 - " + " - ".join(p1_func)
|
|
264
|
+
con = Constraint(
|
|
265
|
+
name="p1_con",
|
|
266
|
+
symbol="p1_con",
|
|
267
|
+
cons_type=ConstraintTypeEnum.EQ,
|
|
268
|
+
func=p1_func,
|
|
269
|
+
is_twice_differentiable=True,
|
|
270
|
+
)
|
|
271
|
+
constraints.append(con)
|
|
272
|
+
|
|
273
|
+
p2_func = "P_2 - " + " - ".join(p2_func)
|
|
274
|
+
con = Constraint(
|
|
275
|
+
name="p2_con",
|
|
276
|
+
symbol="p2_con",
|
|
277
|
+
cons_type=ConstraintTypeEnum.EQ,
|
|
278
|
+
func=p2_func,
|
|
279
|
+
is_twice_differentiable=True,
|
|
280
|
+
)
|
|
281
|
+
constraints.append(con)
|
|
282
|
+
|
|
283
|
+
p3_func = "P_3 - " + " - ".join(p3_func)
|
|
284
|
+
con = Constraint(
|
|
285
|
+
name="p3_con",
|
|
286
|
+
symbol="p3_con",
|
|
287
|
+
cons_type=ConstraintTypeEnum.EQ,
|
|
288
|
+
func=p3_func,
|
|
289
|
+
is_twice_differentiable=True,
|
|
290
|
+
)
|
|
291
|
+
constraints.append(con)
|
|
292
|
+
|
|
293
|
+
# print(v_func)
|
|
294
|
+
# print(p1_func)
|
|
295
|
+
# print(p2_func)
|
|
296
|
+
# print(p3_func)
|
|
297
|
+
|
|
298
|
+
# form the objective function sums
|
|
299
|
+
f_2_func = " + ".join(f_2_func)
|
|
300
|
+
f_3_func = f"{discounting[0]} * P_1 + {discounting[1]} * P_2 + {discounting[2]} * P_3"
|
|
301
|
+
f_1_func = "V_end + " + f_3_func
|
|
302
|
+
|
|
303
|
+
# print(f_1_func)
|
|
304
|
+
# print(f_2_func)
|
|
305
|
+
# print(f_3_func)
|
|
306
|
+
|
|
307
|
+
f_1 = Objective(
|
|
308
|
+
name="Net present value",
|
|
309
|
+
symbol="f_1",
|
|
310
|
+
func=f_1_func,
|
|
311
|
+
maximize=True,
|
|
312
|
+
objective_type=ObjectiveTypeEnum.analytical,
|
|
313
|
+
is_linear=True,
|
|
314
|
+
is_convex=False, # not checked
|
|
315
|
+
is_twice_differentiable=True,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
f_2 = Objective(
|
|
319
|
+
name="Wood stock volume",
|
|
320
|
+
symbol="f_2",
|
|
321
|
+
func=f_2_func,
|
|
322
|
+
maximize=True,
|
|
323
|
+
objective_type=ObjectiveTypeEnum.analytical,
|
|
324
|
+
is_linear=True,
|
|
325
|
+
is_convex=False, # not checked
|
|
326
|
+
is_twice_differentiable=True,
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
f_3 = Objective(
|
|
330
|
+
name="Harvest value",
|
|
331
|
+
symbol="f_3",
|
|
332
|
+
func=f_3_func,
|
|
333
|
+
maximize=True,
|
|
334
|
+
objective_type=ObjectiveTypeEnum.analytical,
|
|
335
|
+
is_linear=True,
|
|
336
|
+
is_convex=False, # not checked
|
|
337
|
+
is_twice_differentiable=True,
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# This is so bad, but we currently don't have a better way
|
|
341
|
+
ideals, nadirs = payoff_table_method(
|
|
342
|
+
problem=Problem(
|
|
343
|
+
name=problem_name,
|
|
344
|
+
description="A test forest problem.",
|
|
345
|
+
constants=constants,
|
|
346
|
+
variables=variables,
|
|
347
|
+
objectives=[f_1, f_2, f_3],
|
|
348
|
+
constraints=constraints,
|
|
349
|
+
),
|
|
350
|
+
solver=available_solvers["gurobipy"],
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
print(ideals)
|
|
354
|
+
print(nadirs)
|
|
355
|
+
|
|
356
|
+
f_1 = Objective(
|
|
357
|
+
name="Nettonykyarvo / €",
|
|
358
|
+
symbol="f_1",
|
|
359
|
+
func=f_1_func,
|
|
360
|
+
maximize=True,
|
|
361
|
+
ideal=math.ceil(ideals["f_1"]),
|
|
362
|
+
nadir=math.floor(nadirs["f_1"]),
|
|
363
|
+
objective_type=ObjectiveTypeEnum.analytical,
|
|
364
|
+
is_linear=True,
|
|
365
|
+
is_convex=False, # not checked
|
|
366
|
+
is_twice_differentiable=True,
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
f_2 = Objective(
|
|
370
|
+
name=f"Puuston tilavuus / m^3\n(alussa {wood_volume_0}m^3)",
|
|
371
|
+
symbol="f_2",
|
|
372
|
+
func=f_2_func,
|
|
373
|
+
maximize=True,
|
|
374
|
+
ideal=math.ceil(ideals["f_2"]),
|
|
375
|
+
nadir=math.floor(nadirs["f_2"]),
|
|
376
|
+
objective_type=ObjectiveTypeEnum.analytical,
|
|
377
|
+
is_linear=True,
|
|
378
|
+
is_convex=False, # not checked
|
|
379
|
+
is_twice_differentiable=True,
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
f_3 = Objective(
|
|
383
|
+
name="Hakkuiden tuotto / €",
|
|
384
|
+
symbol="f_3",
|
|
385
|
+
func=f_3_func,
|
|
386
|
+
maximize=True,
|
|
387
|
+
ideal=math.ceil(ideals["f_3"]),
|
|
388
|
+
nadir=math.floor(nadirs["f_3"]),
|
|
389
|
+
objective_type=ObjectiveTypeEnum.analytical,
|
|
390
|
+
is_linear=True,
|
|
391
|
+
is_convex=False, # not checked
|
|
392
|
+
is_twice_differentiable=True,
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
return Problem(
|
|
396
|
+
name=problem_name,
|
|
397
|
+
description="A test forest problem.",
|
|
398
|
+
constants=constants,
|
|
399
|
+
variables=variables,
|
|
400
|
+
objectives=[f_1, f_2, f_3],
|
|
401
|
+
constraints=constraints,
|
|
402
|
+
is_twice_differentiable=True,
|
|
403
|
+
), schedule_dict
|