rtc-tools 2.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rtc_tools-2.7.3.dist-info/METADATA +53 -0
- rtc_tools-2.7.3.dist-info/RECORD +50 -0
- rtc_tools-2.7.3.dist-info/WHEEL +5 -0
- rtc_tools-2.7.3.dist-info/entry_points.txt +3 -0
- rtc_tools-2.7.3.dist-info/licenses/COPYING.LESSER +165 -0
- rtc_tools-2.7.3.dist-info/top_level.txt +1 -0
- rtctools/__init__.py +5 -0
- rtctools/_internal/__init__.py +0 -0
- rtctools/_internal/alias_tools.py +188 -0
- rtctools/_internal/caching.py +25 -0
- rtctools/_internal/casadi_helpers.py +99 -0
- rtctools/_internal/debug_check_helpers.py +41 -0
- rtctools/_version.py +21 -0
- rtctools/data/__init__.py +4 -0
- rtctools/data/csv.py +150 -0
- rtctools/data/interpolation/__init__.py +3 -0
- rtctools/data/interpolation/bspline.py +31 -0
- rtctools/data/interpolation/bspline1d.py +169 -0
- rtctools/data/interpolation/bspline2d.py +54 -0
- rtctools/data/netcdf.py +467 -0
- rtctools/data/pi.py +1236 -0
- rtctools/data/rtc.py +228 -0
- rtctools/data/storage.py +343 -0
- rtctools/optimization/__init__.py +0 -0
- rtctools/optimization/collocated_integrated_optimization_problem.py +3208 -0
- rtctools/optimization/control_tree_mixin.py +221 -0
- rtctools/optimization/csv_lookup_table_mixin.py +462 -0
- rtctools/optimization/csv_mixin.py +300 -0
- rtctools/optimization/goal_programming_mixin.py +769 -0
- rtctools/optimization/goal_programming_mixin_base.py +1094 -0
- rtctools/optimization/homotopy_mixin.py +165 -0
- rtctools/optimization/initial_state_estimation_mixin.py +89 -0
- rtctools/optimization/io_mixin.py +320 -0
- rtctools/optimization/linearization_mixin.py +33 -0
- rtctools/optimization/linearized_order_goal_programming_mixin.py +235 -0
- rtctools/optimization/min_abs_goal_programming_mixin.py +385 -0
- rtctools/optimization/modelica_mixin.py +482 -0
- rtctools/optimization/netcdf_mixin.py +177 -0
- rtctools/optimization/optimization_problem.py +1302 -0
- rtctools/optimization/pi_mixin.py +292 -0
- rtctools/optimization/planning_mixin.py +19 -0
- rtctools/optimization/single_pass_goal_programming_mixin.py +676 -0
- rtctools/optimization/timeseries.py +56 -0
- rtctools/rtctoolsapp.py +131 -0
- rtctools/simulation/__init__.py +0 -0
- rtctools/simulation/csv_mixin.py +171 -0
- rtctools/simulation/io_mixin.py +195 -0
- rtctools/simulation/pi_mixin.py +255 -0
- rtctools/simulation/simulation_problem.py +1293 -0
- rtctools/util.py +241 -0
|
@@ -0,0 +1,676 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
import logging
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import Dict, Union
|
|
6
|
+
|
|
7
|
+
import casadi as ca
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from .goal_programming_mixin import GoalProgrammingMixin
|
|
11
|
+
from .goal_programming_mixin_base import ( # noqa: F401
|
|
12
|
+
Goal,
|
|
13
|
+
StateGoal,
|
|
14
|
+
_EmptyEnsembleList,
|
|
15
|
+
_EmptyEnsembleOrderedDict,
|
|
16
|
+
_GoalProgrammingMixinBase,
|
|
17
|
+
)
|
|
18
|
+
from .timeseries import Timeseries
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger("rtctools")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class SinglePassMethod(Enum):
|
|
24
|
+
APPEND_CONSTRAINTS_OBJECTIVE = 1
|
|
25
|
+
UPDATE_OBJECTIVE_CONSTRAINT_BOUNDS = 2
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SinglePassGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
29
|
+
r"""
|
|
30
|
+
Adds lexicographic goal programming to your optimization problem.
|
|
31
|
+
|
|
32
|
+
Unlike :py:class:`.GoalProgrammingMixin`, this mixin will call
|
|
33
|
+
:py:meth:`.transcribe` only once per call to :py:meth:`.optimize`, and not
|
|
34
|
+
:math:`N` times for :math:`N` priorities. It works similar to how
|
|
35
|
+
`keep_soft_constraints = True` works for :py:class:`.GoalProgrammingMixin`,
|
|
36
|
+
while avoiding the repeated calls to transcribe the problem.
|
|
37
|
+
|
|
38
|
+
This mixin can work in one of two ways. What is shared between them is
|
|
39
|
+
that all violation variables of all goals are generated once at the
|
|
40
|
+
beginning, such that the state vector is exactly the same for all
|
|
41
|
+
priorities. They also share that all goal constraints are added from the
|
|
42
|
+
start. How they differ is in how they handle/append the constraints on the
|
|
43
|
+
objective of previous priorities:
|
|
44
|
+
|
|
45
|
+
1. At priority :math:`i` the constraints are the same as the ones at
|
|
46
|
+
priority :math:`i - 1` with the addition of the objective constraint
|
|
47
|
+
related to priority :math:`i - 1`. This is the default method.
|
|
48
|
+
|
|
49
|
+
2. All objective constraints are added at the start. The objective
|
|
50
|
+
constraints will have bound of :math:`[-\inf, \inf]` at the start, to be
|
|
51
|
+
updated after each priority finishes.
|
|
52
|
+
|
|
53
|
+
There is a special `qpsol` alternative available :py:class:`CachingQPSol`,
|
|
54
|
+
that will avoid recalculations on constraints that were already there in
|
|
55
|
+
previous priorities. This works for both options outlined above, because
|
|
56
|
+
the assumptions of :py:class:`CachingQPSol` are that:
|
|
57
|
+
|
|
58
|
+
1. The state vector does not change
|
|
59
|
+
2. Any new constraints are appended at the end
|
|
60
|
+
|
|
61
|
+
.. note::
|
|
62
|
+
|
|
63
|
+
Just like GoalProgrammingMixin, objective constraints are only added on
|
|
64
|
+
the goal objectives, not on any custom user objective.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
single_pass_method = SinglePassMethod.APPEND_CONSTRAINTS_OBJECTIVE
|
|
68
|
+
|
|
69
|
+
def __init__(self, **kwargs):
|
|
70
|
+
# Call parent class first for default behaviour.
|
|
71
|
+
super().__init__(**kwargs)
|
|
72
|
+
|
|
73
|
+
# Initialize instance variables, so that the overridden methods may be
|
|
74
|
+
# called outside of the goal programming loop, for example in pre().
|
|
75
|
+
self._gp_first_run = True
|
|
76
|
+
self.__results_are_current = False
|
|
77
|
+
|
|
78
|
+
self.__constraint_store = _EmptyEnsembleOrderedDict()
|
|
79
|
+
self.__path_constraint_store = _EmptyEnsembleOrderedDict()
|
|
80
|
+
|
|
81
|
+
self.__problem_constraints = _EmptyEnsembleList()
|
|
82
|
+
self.__problem_path_constraints = _EmptyEnsembleList()
|
|
83
|
+
self.__problem_epsilons = []
|
|
84
|
+
self.__problem_path_epsilons = []
|
|
85
|
+
self.__problem_path_timeseries = []
|
|
86
|
+
self.__problem_parameters = []
|
|
87
|
+
|
|
88
|
+
self.__current_priority = 0
|
|
89
|
+
self.__original_constraints = None
|
|
90
|
+
self.__previous_constraints = None
|
|
91
|
+
|
|
92
|
+
self.__soft_constraints_per_priority = []
|
|
93
|
+
self.__path_soft_constraints_per_priority = []
|
|
94
|
+
|
|
95
|
+
self.__objectives_per_priority = []
|
|
96
|
+
self.__path_objectives_per_priority = []
|
|
97
|
+
|
|
98
|
+
if isinstance(self, GoalProgrammingMixin):
|
|
99
|
+
raise Exception(
|
|
100
|
+
"Cannot be an instance of both GoalProgrammingMixin "
|
|
101
|
+
"and SinglePassGoalProgrammingMixin"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def extra_variables(self):
|
|
106
|
+
return self.__problem_epsilons
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def path_variables(self):
|
|
110
|
+
return self.__problem_path_epsilons
|
|
111
|
+
|
|
112
|
+
def bounds(self):
|
|
113
|
+
bounds = super().bounds()
|
|
114
|
+
for epsilon in self.__problem_epsilons + self.__problem_path_epsilons:
|
|
115
|
+
bounds[epsilon.name()] = (0.0, 1.0)
|
|
116
|
+
return bounds
|
|
117
|
+
|
|
118
|
+
def constant_inputs(self, ensemble_member):
|
|
119
|
+
constant_inputs = super().constant_inputs(ensemble_member)
|
|
120
|
+
|
|
121
|
+
n_times = len(self.times())
|
|
122
|
+
|
|
123
|
+
# Append min/max timeseries to the constant inputs. Note that min/max
|
|
124
|
+
# timeseries are shared between all ensemble members.
|
|
125
|
+
for variable, value in self.__problem_path_timeseries:
|
|
126
|
+
if isinstance(value, np.ndarray):
|
|
127
|
+
value = Timeseries(self.times(), np.broadcast_to(value, (n_times, len(value))))
|
|
128
|
+
elif not isinstance(value, Timeseries):
|
|
129
|
+
value = Timeseries(self.times(), np.full(n_times, value))
|
|
130
|
+
|
|
131
|
+
constant_inputs[variable] = value
|
|
132
|
+
return constant_inputs
|
|
133
|
+
|
|
134
|
+
def parameters(self, ensemble_member):
|
|
135
|
+
parameters = super().parameters(ensemble_member)
|
|
136
|
+
|
|
137
|
+
# Append min/max values to the parameters. Note that min/max values
|
|
138
|
+
# are shared between all ensemble members.
|
|
139
|
+
for variable, value in self.__problem_parameters:
|
|
140
|
+
parameters[variable] = value
|
|
141
|
+
|
|
142
|
+
return parameters
|
|
143
|
+
|
|
144
|
+
def seed(self, ensemble_member):
|
|
145
|
+
assert self._gp_first_run
|
|
146
|
+
|
|
147
|
+
seed = super().seed(ensemble_member)
|
|
148
|
+
|
|
149
|
+
# Seed epsilons of current priority
|
|
150
|
+
for epsilon in self.__problem_epsilons:
|
|
151
|
+
eps_size = epsilon.size1()
|
|
152
|
+
if eps_size > 1:
|
|
153
|
+
seed[epsilon.name()] = np.ones(eps_size)
|
|
154
|
+
else:
|
|
155
|
+
seed[epsilon.name()] = 1.0
|
|
156
|
+
|
|
157
|
+
times = self.times()
|
|
158
|
+
for epsilon in self.__problem_path_epsilons:
|
|
159
|
+
eps_size = epsilon.size1()
|
|
160
|
+
if eps_size > 1:
|
|
161
|
+
seed[epsilon.name()] = Timeseries(times, np.ones((eps_size, len(times))))
|
|
162
|
+
else:
|
|
163
|
+
seed[epsilon.name()] = Timeseries(times, np.ones(len(times)))
|
|
164
|
+
|
|
165
|
+
return seed
|
|
166
|
+
|
|
167
|
+
def constraints(self, ensemble_member):
|
|
168
|
+
constraints = super().constraints(ensemble_member)
|
|
169
|
+
|
|
170
|
+
additional_constraints = itertools.chain(
|
|
171
|
+
self.__constraint_store[ensemble_member].values(),
|
|
172
|
+
self.__problem_constraints[ensemble_member],
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
for constraint in additional_constraints:
|
|
176
|
+
constraints.append((constraint.function(self), constraint.min, constraint.max))
|
|
177
|
+
|
|
178
|
+
return constraints
|
|
179
|
+
|
|
180
|
+
def path_constraints(self, ensemble_member):
|
|
181
|
+
path_constraints = super().path_constraints(ensemble_member)
|
|
182
|
+
|
|
183
|
+
additional_path_constraints = itertools.chain(
|
|
184
|
+
self.__path_constraint_store[ensemble_member].values(),
|
|
185
|
+
self.__problem_path_constraints[ensemble_member],
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
for constraint in additional_path_constraints:
|
|
189
|
+
path_constraints.append((constraint.function(self), constraint.min, constraint.max))
|
|
190
|
+
|
|
191
|
+
return path_constraints
|
|
192
|
+
|
|
193
|
+
def solver_options(self):
|
|
194
|
+
# TODO: Split off into private
|
|
195
|
+
|
|
196
|
+
# Call parent
|
|
197
|
+
options = super().solver_options()
|
|
198
|
+
|
|
199
|
+
solver = options["solver"]
|
|
200
|
+
assert solver in ["bonmin", "ipopt"]
|
|
201
|
+
|
|
202
|
+
# Make sure constant states, such as min/max timeseries for violation variables,
|
|
203
|
+
# are turned into parameters for the final optimization problem.
|
|
204
|
+
ipopt_options = options[solver]
|
|
205
|
+
ipopt_options["fixed_variable_treatment"] = "make_parameter"
|
|
206
|
+
|
|
207
|
+
# Define temporary variable to avoid infinite loop between
|
|
208
|
+
# solver_options and goal_programming_options.
|
|
209
|
+
self._loop_breaker_solver_options = True
|
|
210
|
+
|
|
211
|
+
if not hasattr(self, "_loop_breaker_goal_programming_options"):
|
|
212
|
+
if not self.goal_programming_options()["mu_reinit"]:
|
|
213
|
+
ipopt_options["mu_strategy"] = "monotone"
|
|
214
|
+
if not self._gp_first_run:
|
|
215
|
+
ipopt_options["mu_init"] = self.solver_stats["iterations"]["mu"][-1]
|
|
216
|
+
|
|
217
|
+
delattr(self, "_loop_breaker_solver_options")
|
|
218
|
+
|
|
219
|
+
return options
|
|
220
|
+
|
|
221
|
+
def goal_programming_options(self) -> Dict[str, Union[float, bool]]:
|
|
222
|
+
"""
|
|
223
|
+
Returns a dictionary of options controlling the goal programming process.
|
|
224
|
+
|
|
225
|
+
+---------------------------+-----------+---------------+
|
|
226
|
+
| Option | Type | Default value |
|
|
227
|
+
+===========================+===========+===============+
|
|
228
|
+
| ``constraint_relaxation`` | ``float`` | ``0.0`` |
|
|
229
|
+
+---------------------------+-----------+---------------+
|
|
230
|
+
| ``mu_reinit`` | ``bool`` | ``True`` |
|
|
231
|
+
+---------------------------+-----------+---------------+
|
|
232
|
+
| ``fix_minimized_values`` | ``bool`` | ``True/False``|
|
|
233
|
+
+---------------------------+-----------+---------------+
|
|
234
|
+
| ``check_monotonicity`` | ``bool`` | ``True`` |
|
|
235
|
+
+---------------------------+-----------+---------------+
|
|
236
|
+
| ``equality_threshold`` | ``float`` | ``1e-8`` |
|
|
237
|
+
+---------------------------+-----------+---------------+
|
|
238
|
+
| ``scale_by_problem_size`` | ``bool`` | ``False`` |
|
|
239
|
+
+---------------------------+-----------+---------------+
|
|
240
|
+
|
|
241
|
+
When a priority's objective is turned into a hard constraint,
|
|
242
|
+
the constraint is relaxed with ``constraint_relaxation``. Use of this option is
|
|
243
|
+
normally not required. Note that:
|
|
244
|
+
|
|
245
|
+
When using the default solver (IPOPT), its barrier parameter ``mu`` is
|
|
246
|
+
normally re-initialized at every iteration of the goal programming
|
|
247
|
+
algorithm, unless mu_reinit is set to ``False``. Use of this option
|
|
248
|
+
is normally not required.
|
|
249
|
+
|
|
250
|
+
If ``fix_minimized_values`` is set to ``True``, goal functions will be set to equal their
|
|
251
|
+
optimized values in optimization problems generated during subsequent priorities. Otherwise,
|
|
252
|
+
only an upper bound will be set. Use of this option is normally not required.
|
|
253
|
+
Note that the use of this option may add non-convex constraints to the optimization
|
|
254
|
+
problem. The default value for this parameter is ``True`` for the default solvers
|
|
255
|
+
IPOPT/BONMIN. If any other solver is used, the default value is ``False``.
|
|
256
|
+
|
|
257
|
+
If ``check_monotonicity`` is set to ``True``, then it will be checked whether goals with
|
|
258
|
+
the same function key form a monotonically decreasing sequence with regards to the target
|
|
259
|
+
interval.
|
|
260
|
+
|
|
261
|
+
The option ``equality_threshold`` controls when a two-sided inequality constraint is folded
|
|
262
|
+
into an equality constraint.
|
|
263
|
+
|
|
264
|
+
If ``scale_by_problem_size`` is set to ``True``, the objective (i.e. the sum of the
|
|
265
|
+
violation variables) will be divided by the number of goals, and the path objective will
|
|
266
|
+
be divided by the number of path goals and the number of active time steps (per goal).
|
|
267
|
+
This will make sure the objectives are always in the range [0, 1], at the cost of solving
|
|
268
|
+
each goal/time step less accurately.
|
|
269
|
+
|
|
270
|
+
:returns: A dictionary of goal programming options.
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
options = {}
|
|
274
|
+
|
|
275
|
+
options["mu_reinit"] = True
|
|
276
|
+
options["constraint_relaxation"] = 0.0 # Disable by default
|
|
277
|
+
options["fix_minimized_values"] = False
|
|
278
|
+
options["check_monotonicity"] = True
|
|
279
|
+
options["equality_threshold"] = 1e-8
|
|
280
|
+
options["scale_by_problem_size"] = False
|
|
281
|
+
|
|
282
|
+
# Forced options to be able to re-use GoalProgrammingMixin's
|
|
283
|
+
# GoalProgrammingMixin._gp_* functions. These are not relevant for
|
|
284
|
+
# SinglePassGoalProgrammingMixin, or should be set to a certain value
|
|
285
|
+
# for it to make sense.
|
|
286
|
+
options["violation_relaxation"] = 0.0 # Disable by default
|
|
287
|
+
options["violation_tolerance"] = np.inf # Disable by default
|
|
288
|
+
options["interior_distance"] = 1e-6
|
|
289
|
+
options["keep_soft_constraints"] = True
|
|
290
|
+
|
|
291
|
+
# Define temporary variable to avoid infinite loop between
|
|
292
|
+
# solver_options and goal_programming_options.
|
|
293
|
+
self._loop_breaker_goal_programming_options = True
|
|
294
|
+
|
|
295
|
+
if not hasattr(self, "_loop_breaker_solver_options"):
|
|
296
|
+
if self.solver_options()["solver"] in {"ipopt", "bonmin"}:
|
|
297
|
+
options["fix_minimized_values"] = True
|
|
298
|
+
|
|
299
|
+
delattr(self, "_loop_breaker_goal_programming_options")
|
|
300
|
+
|
|
301
|
+
return options
|
|
302
|
+
|
|
303
|
+
def optimize(self, preprocessing=True, postprocessing=True, log_solver_failure_as_error=True):
|
|
304
|
+
# Do pre-processing
|
|
305
|
+
if preprocessing:
|
|
306
|
+
self.pre()
|
|
307
|
+
|
|
308
|
+
# Group goals into subproblems
|
|
309
|
+
subproblems = []
|
|
310
|
+
goals = self.goals()
|
|
311
|
+
path_goals = self.path_goals()
|
|
312
|
+
|
|
313
|
+
options = self.goal_programming_options()
|
|
314
|
+
|
|
315
|
+
# Validate goal definitions
|
|
316
|
+
self._gp_validate_goals(goals, is_path_goal=False)
|
|
317
|
+
self._gp_validate_goals(path_goals, is_path_goal=True)
|
|
318
|
+
|
|
319
|
+
priorities = sorted(
|
|
320
|
+
{int(goal.priority) for goal in itertools.chain(goals, path_goals) if not goal.is_empty}
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
for priority in priorities:
|
|
324
|
+
subproblems.append(
|
|
325
|
+
(
|
|
326
|
+
priority,
|
|
327
|
+
[
|
|
328
|
+
goal
|
|
329
|
+
for goal in goals
|
|
330
|
+
if int(goal.priority) == priority and not goal.is_empty
|
|
331
|
+
],
|
|
332
|
+
[
|
|
333
|
+
goal
|
|
334
|
+
for goal in path_goals
|
|
335
|
+
if int(goal.priority) == priority and not goal.is_empty
|
|
336
|
+
],
|
|
337
|
+
)
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# Solve the subproblems one by one
|
|
341
|
+
logger.info("Starting goal programming")
|
|
342
|
+
|
|
343
|
+
success = False
|
|
344
|
+
|
|
345
|
+
self.__constraint_store = [OrderedDict() for ensemble_member in range(self.ensemble_size)]
|
|
346
|
+
self.__path_constraint_store = [
|
|
347
|
+
OrderedDict() for ensemble_member in range(self.ensemble_size)
|
|
348
|
+
]
|
|
349
|
+
|
|
350
|
+
self.__problem_constraints = [[] for ensemble_member in range(self.ensemble_size)]
|
|
351
|
+
self.__problem_path_constraints = [[] for ensemble_member in range(self.ensemble_size)]
|
|
352
|
+
|
|
353
|
+
self.__problem_epsilons = []
|
|
354
|
+
self.__problem_parameters = []
|
|
355
|
+
self.__problem_path_epsilons = []
|
|
356
|
+
self.__problem_path_timeseries = []
|
|
357
|
+
|
|
358
|
+
self._gp_first_run = True
|
|
359
|
+
self.__results_are_current = False
|
|
360
|
+
|
|
361
|
+
self.__current_priority = 0
|
|
362
|
+
self.__original_constraints = None
|
|
363
|
+
|
|
364
|
+
self.__objectives_per_priority = []
|
|
365
|
+
self.__path_objectives_per_priority = []
|
|
366
|
+
|
|
367
|
+
self.__additional_constraints = []
|
|
368
|
+
self.__objectives = []
|
|
369
|
+
|
|
370
|
+
for i, (_, goals, path_goals) in enumerate(subproblems):
|
|
371
|
+
(
|
|
372
|
+
subproblem_epsilons,
|
|
373
|
+
subproblem_objectives,
|
|
374
|
+
subproblem_soft_constraints,
|
|
375
|
+
hard_constraints,
|
|
376
|
+
subproblem_parameters,
|
|
377
|
+
) = self._gp_goal_constraints(goals, i, options, is_path_goal=False)
|
|
378
|
+
|
|
379
|
+
(
|
|
380
|
+
subproblem_path_epsilons,
|
|
381
|
+
subproblem_path_objectives,
|
|
382
|
+
subproblem_path_soft_constraints,
|
|
383
|
+
path_hard_constraints,
|
|
384
|
+
subproblem_path_timeseries,
|
|
385
|
+
) = self._gp_goal_constraints(path_goals, i, options, is_path_goal=True)
|
|
386
|
+
|
|
387
|
+
# Put hard constraints in the constraint stores
|
|
388
|
+
self._gp_update_constraint_store(self.__constraint_store, hard_constraints)
|
|
389
|
+
self._gp_update_constraint_store(self.__path_constraint_store, path_hard_constraints)
|
|
390
|
+
|
|
391
|
+
# Append new variables, parameters, timeseries and constraints to
|
|
392
|
+
# their respective lists
|
|
393
|
+
self.__problem_epsilons.extend(subproblem_epsilons)
|
|
394
|
+
self.__problem_path_epsilons.extend(subproblem_path_epsilons)
|
|
395
|
+
|
|
396
|
+
self.__problem_parameters.extend(subproblem_parameters)
|
|
397
|
+
self.__problem_path_timeseries.extend(subproblem_path_timeseries)
|
|
398
|
+
|
|
399
|
+
for ensemble_member in range(self.ensemble_size):
|
|
400
|
+
self.__problem_constraints[ensemble_member].extend(
|
|
401
|
+
subproblem_soft_constraints[ensemble_member]
|
|
402
|
+
)
|
|
403
|
+
self.__problem_path_constraints[ensemble_member].extend(
|
|
404
|
+
subproblem_path_soft_constraints[ensemble_member]
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
self.__objectives_per_priority.append(subproblem_objectives)
|
|
408
|
+
self.__path_objectives_per_priority.append(subproblem_path_objectives)
|
|
409
|
+
|
|
410
|
+
for priority in priorities:
|
|
411
|
+
logger.info("Solving goals at priority {}".format(priority))
|
|
412
|
+
|
|
413
|
+
# Call the pre priority hook
|
|
414
|
+
self.priority_started(priority)
|
|
415
|
+
|
|
416
|
+
# Solve subproblem
|
|
417
|
+
success = super().optimize(
|
|
418
|
+
preprocessing=False,
|
|
419
|
+
postprocessing=False,
|
|
420
|
+
log_solver_failure_as_error=log_solver_failure_as_error,
|
|
421
|
+
)
|
|
422
|
+
if not success:
|
|
423
|
+
break
|
|
424
|
+
|
|
425
|
+
self._gp_first_run = False
|
|
426
|
+
|
|
427
|
+
# To match GoalProgrammingMixin's behavior of applying the
|
|
428
|
+
# constraint_relaxation value at priority 2 on the objective of
|
|
429
|
+
# priority 2 (and not that of priority 1), we have to store the
|
|
430
|
+
# two relevant options here for later use.
|
|
431
|
+
options = self.goal_programming_options()
|
|
432
|
+
self.__objective_constraint_options = {
|
|
433
|
+
k: v
|
|
434
|
+
for k, v in options.items()
|
|
435
|
+
if k in {"fix_minimized_values", "constraint_relaxation"}
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
# Store results. Do this here, to make sure we have results even
|
|
439
|
+
# if a subsequent priority fails.
|
|
440
|
+
self.__results_are_current = False
|
|
441
|
+
self.__results = [
|
|
442
|
+
self.extract_results(ensemble_member)
|
|
443
|
+
for ensemble_member in range(self.ensemble_size)
|
|
444
|
+
]
|
|
445
|
+
self.__results_are_current = True
|
|
446
|
+
|
|
447
|
+
# Call the post priority hook, so that intermediate results can be
|
|
448
|
+
# logged/inspected.
|
|
449
|
+
self.priority_completed(priority)
|
|
450
|
+
|
|
451
|
+
self.__current_priority += 1
|
|
452
|
+
|
|
453
|
+
logger.info("Done goal programming")
|
|
454
|
+
|
|
455
|
+
# Do post-processing
|
|
456
|
+
if postprocessing:
|
|
457
|
+
self.post()
|
|
458
|
+
|
|
459
|
+
# Done
|
|
460
|
+
return success
|
|
461
|
+
|
|
462
|
+
def transcribe(self):
|
|
463
|
+
def _objective_func(subproblem_objectives, subproblem_path_objectives):
|
|
464
|
+
val = 0.0
|
|
465
|
+
for ensemble_member in range(self.ensemble_size):
|
|
466
|
+
n_objectives = self._gp_n_objectives(
|
|
467
|
+
subproblem_objectives, subproblem_path_objectives, ensemble_member
|
|
468
|
+
)
|
|
469
|
+
expr = self._gp_objective(subproblem_objectives, n_objectives, ensemble_member)
|
|
470
|
+
expr += ca.sum1(
|
|
471
|
+
self.map_path_expression(
|
|
472
|
+
self._gp_path_objective(
|
|
473
|
+
subproblem_path_objectives, n_objectives, ensemble_member
|
|
474
|
+
),
|
|
475
|
+
ensemble_member,
|
|
476
|
+
)
|
|
477
|
+
)
|
|
478
|
+
val += self.ensemble_member_probability(ensemble_member) * expr
|
|
479
|
+
|
|
480
|
+
return val
|
|
481
|
+
|
|
482
|
+
if self._gp_first_run:
|
|
483
|
+
discrete, lbx, ubx, lbg, ubg, x0, nlp = super().transcribe()
|
|
484
|
+
self.__original_transcribe = (discrete, lbx, ubx, lbg, ubg, x0, nlp)
|
|
485
|
+
|
|
486
|
+
self.__additional_constraints = []
|
|
487
|
+
self.__objectives = []
|
|
488
|
+
|
|
489
|
+
# Objectives
|
|
490
|
+
for subproblem_objectives, subproblem_path_objectives in zip(
|
|
491
|
+
self.__objectives_per_priority, self.__path_objectives_per_priority
|
|
492
|
+
):
|
|
493
|
+
self.__objectives.append(
|
|
494
|
+
_objective_func(subproblem_objectives, subproblem_path_objectives)
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
if self.single_pass_method == SinglePassMethod.UPDATE_OBJECTIVE_CONSTRAINT_BOUNDS:
|
|
498
|
+
# The objectives are also directly added as constraints
|
|
499
|
+
constraints = [(objective, -np.inf, np.inf) for objective in self.__objectives]
|
|
500
|
+
self.__additional_constraints.extend(constraints)
|
|
501
|
+
|
|
502
|
+
# Add constraint on the objective of previous priority
|
|
503
|
+
if self.__current_priority > 0:
|
|
504
|
+
options = self.__objective_constraint_options
|
|
505
|
+
|
|
506
|
+
previous_objective = self.__objectives[self.__current_priority - 1]
|
|
507
|
+
f = ca.Function("tmp", [self.solver_input], [previous_objective])
|
|
508
|
+
obj_val = float(f(self.solver_output))
|
|
509
|
+
|
|
510
|
+
if options["fix_minimized_values"]:
|
|
511
|
+
lb, ub = obj_val, obj_val
|
|
512
|
+
self.linear_collocation = False # Disable solver option jac_c_constant for IPOPT
|
|
513
|
+
else:
|
|
514
|
+
obj_val += options["constraint_relaxation"]
|
|
515
|
+
lb, ub = -np.inf, obj_val
|
|
516
|
+
|
|
517
|
+
if self.single_pass_method == SinglePassMethod.APPEND_CONSTRAINTS_OBJECTIVE:
|
|
518
|
+
self.__additional_constraints.append(
|
|
519
|
+
(self.__objectives[self.__current_priority - 1], lb, ub)
|
|
520
|
+
)
|
|
521
|
+
elif self.single_pass_method == SinglePassMethod.UPDATE_OBJECTIVE_CONSTRAINT_BOUNDS:
|
|
522
|
+
ind = self.__current_priority - 1
|
|
523
|
+
constraint = self.__additional_constraints[ind]
|
|
524
|
+
self.__additional_constraints[ind] = (constraint[0], lb, ub)
|
|
525
|
+
|
|
526
|
+
# Update the NLP
|
|
527
|
+
discrete, lbx, ubx, lbg, ubg, x0, nlp = self.__original_transcribe
|
|
528
|
+
nlp = nlp.copy()
|
|
529
|
+
|
|
530
|
+
if self.__additional_constraints:
|
|
531
|
+
g_extra, lbg_extra, ubg_extra = zip(*self.__additional_constraints)
|
|
532
|
+
|
|
533
|
+
g = ca.vertcat(nlp["g"], *g_extra)
|
|
534
|
+
lbg = [*lbg.copy(), *lbg_extra]
|
|
535
|
+
ubg = [*ubg.copy(), *ubg_extra]
|
|
536
|
+
|
|
537
|
+
nlp["g"] = g
|
|
538
|
+
|
|
539
|
+
nlp["f"] = self.__objectives[self.__current_priority]
|
|
540
|
+
|
|
541
|
+
if not self._gp_first_run:
|
|
542
|
+
x0 = self.solver_output.copy()
|
|
543
|
+
|
|
544
|
+
return discrete, lbx, ubx, lbg, ubg, x0, nlp
|
|
545
|
+
|
|
546
|
+
def extract_results(self, ensemble_member=0):
|
|
547
|
+
if self.__results_are_current:
|
|
548
|
+
logger.debug("Returning cached results")
|
|
549
|
+
return self.__results[ensemble_member]
|
|
550
|
+
|
|
551
|
+
# If self.__results is not up to date, do the super().extract_results
|
|
552
|
+
# method
|
|
553
|
+
return super().extract_results(ensemble_member)
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
class CachingQPSol:
|
|
557
|
+
"""
|
|
558
|
+
Alternative to :py:func:`ca.qpsol` that caches the Jacobian between calls.
|
|
559
|
+
|
|
560
|
+
Typical usage would be something like:
|
|
561
|
+
|
|
562
|
+
.. code-block::
|
|
563
|
+
|
|
564
|
+
def pre(self):
|
|
565
|
+
self._qpsol = CachingQPSol()
|
|
566
|
+
super().pre()
|
|
567
|
+
|
|
568
|
+
def solver_options():
|
|
569
|
+
options = super().solver_options()
|
|
570
|
+
options['casadi_solver'] = self._qpsol
|
|
571
|
+
return options
|
|
572
|
+
"""
|
|
573
|
+
|
|
574
|
+
def __init__(self):
|
|
575
|
+
self._tlcache = {}
|
|
576
|
+
|
|
577
|
+
def __call__(self, name, solver_name, nlp, options):
|
|
578
|
+
class Solver:
|
|
579
|
+
def __init__(
|
|
580
|
+
self, nlp=nlp, solver_name=solver_name, options=options, cache=self._tlcache
|
|
581
|
+
):
|
|
582
|
+
x = nlp["x"]
|
|
583
|
+
f = nlp["f"]
|
|
584
|
+
g = nlp["g"]
|
|
585
|
+
|
|
586
|
+
if isinstance(x, ca.MX):
|
|
587
|
+
# Can only convert SX to DM
|
|
588
|
+
x = ca.SX.sym("X", *x.shape)
|
|
589
|
+
x_mx = nlp["x"]
|
|
590
|
+
expand = True
|
|
591
|
+
else:
|
|
592
|
+
x_mx = None
|
|
593
|
+
expand = False
|
|
594
|
+
|
|
595
|
+
if expand:
|
|
596
|
+
expand_f = ca.Function("f", [x_mx], [f]).expand()
|
|
597
|
+
f = expand_f(x)
|
|
598
|
+
|
|
599
|
+
# Gradient of the objective: gf == Hx + g
|
|
600
|
+
gf = ca.gradient(f, x)
|
|
601
|
+
|
|
602
|
+
# Identify the linear term in the objective
|
|
603
|
+
c = ca.substitute(gf, x, ca.DM.zeros(x.sparsity()))
|
|
604
|
+
|
|
605
|
+
# Identify the quadratic term in the objective
|
|
606
|
+
H = 0.5 * ca.jacobian(gf, x, {"symmetric": True})
|
|
607
|
+
|
|
608
|
+
if cache:
|
|
609
|
+
if not x.size1() == cache["A"].size2():
|
|
610
|
+
raise Exception(
|
|
611
|
+
"Number of variables {} does not match "
|
|
612
|
+
"cached constraint matrix dimensions {}".format(
|
|
613
|
+
x.size1(), cache["A"].shape
|
|
614
|
+
)
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
n_g_cache = cache["A"].size1()
|
|
618
|
+
n_g = g.size1()
|
|
619
|
+
|
|
620
|
+
if n_g_cache == n_g:
|
|
621
|
+
b = cache["b"]
|
|
622
|
+
A = cache["A"]
|
|
623
|
+
else:
|
|
624
|
+
g_new = g[n_g_cache:]
|
|
625
|
+
|
|
626
|
+
if expand:
|
|
627
|
+
expand_g_new = ca.Function("f", [x_mx], [g_new]).expand()
|
|
628
|
+
g_new = expand_g_new(x)
|
|
629
|
+
|
|
630
|
+
# Identify the constant term in the constraints
|
|
631
|
+
b = ca.vertcat(
|
|
632
|
+
cache["b"], ca.substitute(g_new, x, ca.DM.zeros(x.sparsity()))
|
|
633
|
+
)
|
|
634
|
+
|
|
635
|
+
# Identify the linear term in the constraints
|
|
636
|
+
A = ca.vertcat(cache["A"], ca.jacobian(g_new, x))
|
|
637
|
+
else:
|
|
638
|
+
if expand:
|
|
639
|
+
expand_g = ca.Function("f", [x_mx], [g]).expand()
|
|
640
|
+
g = expand_g(x)
|
|
641
|
+
|
|
642
|
+
# Identify the constant term in the constraints
|
|
643
|
+
b = ca.substitute(g, x, ca.DM.zeros(x.sparsity()))
|
|
644
|
+
|
|
645
|
+
# Identify the linear term in the constraints
|
|
646
|
+
A = ca.jacobian(g, x)
|
|
647
|
+
|
|
648
|
+
cache["A"] = A
|
|
649
|
+
cache["b"] = b
|
|
650
|
+
|
|
651
|
+
self._solver = ca.conic(
|
|
652
|
+
"mysolver", solver_name, {"h": H.sparsity(), "a": A.sparsity()}, options
|
|
653
|
+
)
|
|
654
|
+
self._solver_in = {}
|
|
655
|
+
self._solver_in["h"] = ca.DM(H)
|
|
656
|
+
self._solver_in["g"] = ca.DM(c)
|
|
657
|
+
self._solver_in["a"] = ca.DM(A)
|
|
658
|
+
self._b = ca.DM(b)
|
|
659
|
+
|
|
660
|
+
def __call__(self, x0, lbx, ubx, lbg, ubg):
|
|
661
|
+
self._solver_in["x0"] = x0
|
|
662
|
+
self._solver_in["lbx"] = lbx
|
|
663
|
+
self._solver_in["ubx"] = ubx
|
|
664
|
+
self._solver_in["lba"] = lbg - self._b
|
|
665
|
+
self._solver_in["uba"] = ubg - self._b
|
|
666
|
+
|
|
667
|
+
solver_out = self._solver(**self._solver_in)
|
|
668
|
+
|
|
669
|
+
solver_out["f"] = solver_out["cost"]
|
|
670
|
+
|
|
671
|
+
return solver_out
|
|
672
|
+
|
|
673
|
+
def stats(self):
|
|
674
|
+
return self._solver.stats().copy()
|
|
675
|
+
|
|
676
|
+
return Solver()
|