rtc-tools 2.5.2rc3__py3-none-any.whl → 2.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rtc-tools might be problematic. Click here for more details.
- {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/METADATA +7 -7
- rtc_tools-2.6.0.dist-info/RECORD +50 -0
- {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/WHEEL +1 -1
- rtctools/__init__.py +2 -1
- rtctools/_internal/alias_tools.py +12 -10
- rtctools/_internal/caching.py +5 -3
- rtctools/_internal/casadi_helpers.py +11 -32
- rtctools/_internal/debug_check_helpers.py +1 -1
- rtctools/_version.py +3 -3
- rtctools/data/__init__.py +2 -2
- rtctools/data/csv.py +54 -33
- rtctools/data/interpolation/bspline.py +3 -3
- rtctools/data/interpolation/bspline1d.py +42 -29
- rtctools/data/interpolation/bspline2d.py +10 -4
- rtctools/data/netcdf.py +137 -93
- rtctools/data/pi.py +304 -210
- rtctools/data/rtc.py +64 -53
- rtctools/data/storage.py +91 -51
- rtctools/optimization/collocated_integrated_optimization_problem.py +1244 -696
- rtctools/optimization/control_tree_mixin.py +68 -66
- rtctools/optimization/csv_lookup_table_mixin.py +107 -74
- rtctools/optimization/csv_mixin.py +83 -52
- rtctools/optimization/goal_programming_mixin.py +239 -148
- rtctools/optimization/goal_programming_mixin_base.py +204 -111
- rtctools/optimization/homotopy_mixin.py +36 -27
- rtctools/optimization/initial_state_estimation_mixin.py +8 -8
- rtctools/optimization/io_mixin.py +48 -43
- rtctools/optimization/linearization_mixin.py +3 -1
- rtctools/optimization/linearized_order_goal_programming_mixin.py +57 -28
- rtctools/optimization/min_abs_goal_programming_mixin.py +72 -29
- rtctools/optimization/modelica_mixin.py +135 -81
- rtctools/optimization/netcdf_mixin.py +32 -18
- rtctools/optimization/optimization_problem.py +181 -127
- rtctools/optimization/pi_mixin.py +68 -36
- rtctools/optimization/planning_mixin.py +19 -0
- rtctools/optimization/single_pass_goal_programming_mixin.py +159 -112
- rtctools/optimization/timeseries.py +4 -6
- rtctools/rtctoolsapp.py +18 -18
- rtctools/simulation/csv_mixin.py +37 -30
- rtctools/simulation/io_mixin.py +9 -5
- rtctools/simulation/pi_mixin.py +62 -32
- rtctools/simulation/simulation_problem.py +471 -180
- rtctools/util.py +84 -56
- rtc_tools-2.5.2rc3.dist-info/RECORD +0 -49
- {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/COPYING.LESSER +0 -0
- {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/entry_points.txt +0 -0
- {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/top_level.txt +0 -0
|
@@ -30,16 +30,18 @@ class HomotopyMixin(OptimizationProblem):
|
|
|
30
30
|
# Overwrite the seed only when the results of the latest run are
|
|
31
31
|
# stored within this class. That is, when the GoalProgrammingMixin
|
|
32
32
|
# class is not used or at the first run of the goal programming loop.
|
|
33
|
-
if self.__theta > options[
|
|
33
|
+
if self.__theta > options["theta_start"] and getattr(self, "_gp_first_run", True):
|
|
34
34
|
for key, result in self.__results[ensemble_member].items():
|
|
35
35
|
times = self.times(key)
|
|
36
|
-
if (
|
|
37
|
-
|
|
36
|
+
if (result.ndim == 1 and len(result) == len(times)) or (
|
|
37
|
+
result.ndim == 2 and result.shape[0] == len(times)
|
|
38
|
+
):
|
|
38
39
|
# Only include seed timeseries which are consistent
|
|
39
40
|
# with the specified time stamps.
|
|
40
41
|
seed[key] = Timeseries(times, result)
|
|
41
|
-
elif (
|
|
42
|
-
|
|
42
|
+
elif (result.ndim == 1 and len(result) == 1) or (
|
|
43
|
+
result.ndim == 2 and result.shape[0] == 1
|
|
44
|
+
):
|
|
43
45
|
seed[key] = result
|
|
44
46
|
return seed
|
|
45
47
|
|
|
@@ -52,7 +54,7 @@ class HomotopyMixin(OptimizationProblem):
|
|
|
52
54
|
# to avoid accidental usage of the parameter value in e.g. pre().
|
|
53
55
|
# Note that we use a try-except here instead of hasattr, to avoid
|
|
54
56
|
# explicit name mangling.
|
|
55
|
-
parameters[options[
|
|
57
|
+
parameters[options["homotopy_parameter"]] = self.__theta
|
|
56
58
|
except AttributeError:
|
|
57
59
|
pass
|
|
58
60
|
|
|
@@ -74,21 +76,23 @@ class HomotopyMixin(OptimizationProblem):
|
|
|
74
76
|
| ``homotopy_parameter`` | ``string`` | ``theta`` |
|
|
75
77
|
+------------------------+------------+---------------+
|
|
76
78
|
|
|
77
|
-
The homotopy process is controlled by the homotopy parameter in the model, specified
|
|
78
|
-
|
|
79
|
-
and increases to a value of ``1.0`` with a dynamically changing step size. This step
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
79
|
+
The homotopy process is controlled by the homotopy parameter in the model, specified by the
|
|
80
|
+
option ``homotopy_parameter``. The homotopy parameter is initialized to ``theta_start``,
|
|
81
|
+
and increases to a value of ``1.0`` with a dynamically changing step size. This step size
|
|
82
|
+
is initialized with the value of the option ``delta_theta_0``. If this step size is too
|
|
83
|
+
large, i.e., if the problem with the increased homotopy parameter fails to converge, the
|
|
84
|
+
step size is halved. The process of halving terminates when the step size falls below the
|
|
85
|
+
minimum value specified by the option ``delta_theta_min``.
|
|
84
86
|
|
|
85
87
|
:returns: A dictionary of homotopy options.
|
|
86
88
|
"""
|
|
87
89
|
|
|
88
|
-
return {
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
90
|
+
return {
|
|
91
|
+
"theta_start": 0.0,
|
|
92
|
+
"delta_theta_0": 1.0,
|
|
93
|
+
"delta_theta_min": 0.01,
|
|
94
|
+
"homotopy_parameter": "theta",
|
|
95
|
+
}
|
|
92
96
|
|
|
93
97
|
def dynamic_parameters(self):
|
|
94
98
|
dynamic_parameters = super().dynamic_parameters()
|
|
@@ -97,7 +101,7 @@ class HomotopyMixin(OptimizationProblem):
|
|
|
97
101
|
# For theta = 0, we don't mark the homotopy parameter as being dynamic,
|
|
98
102
|
# so that the correct sparsity structure is obtained for the linear model.
|
|
99
103
|
options = self.homotopy_options()
|
|
100
|
-
dynamic_parameters.append(self.variable(options[
|
|
104
|
+
dynamic_parameters.append(self.variable(options["homotopy_parameter"]))
|
|
101
105
|
|
|
102
106
|
return dynamic_parameters
|
|
103
107
|
|
|
@@ -107,18 +111,22 @@ class HomotopyMixin(OptimizationProblem):
|
|
|
107
111
|
self.pre()
|
|
108
112
|
|
|
109
113
|
options = self.homotopy_options()
|
|
110
|
-
delta_theta = options[
|
|
114
|
+
delta_theta = options["delta_theta_0"]
|
|
111
115
|
|
|
112
116
|
# Homotopy loop
|
|
113
|
-
self.__theta = options[
|
|
117
|
+
self.__theta = options["theta_start"]
|
|
114
118
|
|
|
115
119
|
while self.__theta <= 1.0:
|
|
116
120
|
logger.info("Solving with homotopy parameter theta = {}.".format(self.__theta))
|
|
117
121
|
|
|
118
|
-
success = super().optimize(
|
|
122
|
+
success = super().optimize(
|
|
123
|
+
preprocessing=False, postprocessing=False, log_solver_failure_as_error=False
|
|
124
|
+
)
|
|
119
125
|
if success:
|
|
120
126
|
self.__results = [
|
|
121
|
-
self.extract_results(ensemble_member)
|
|
127
|
+
self.extract_results(ensemble_member)
|
|
128
|
+
for ensemble_member in range(self.ensemble_size)
|
|
129
|
+
]
|
|
122
130
|
|
|
123
131
|
if self.__theta == 0.0:
|
|
124
132
|
self.check_collocation_linearity = False
|
|
@@ -128,17 +136,18 @@ class HomotopyMixin(OptimizationProblem):
|
|
|
128
136
|
self.clear_transcription_cache()
|
|
129
137
|
|
|
130
138
|
else:
|
|
131
|
-
if self.__theta == options[
|
|
139
|
+
if self.__theta == options["theta_start"]:
|
|
132
140
|
break
|
|
133
141
|
|
|
134
142
|
self.__theta -= delta_theta
|
|
135
143
|
delta_theta /= 2
|
|
136
144
|
|
|
137
|
-
if delta_theta < options[
|
|
145
|
+
if delta_theta < options["delta_theta_min"]:
|
|
138
146
|
failure_message = (
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
147
|
+
"Solver failed with homotopy parameter theta = {}. Theta cannot "
|
|
148
|
+
"be decreased further, as that would violate the minimum delta "
|
|
149
|
+
"theta of {}.".format(self.__theta, options["delta_theta_min"])
|
|
150
|
+
)
|
|
142
151
|
if log_solver_failure_as_error:
|
|
143
152
|
logger.error(failure_message)
|
|
144
153
|
else:
|
|
@@ -12,9 +12,9 @@ class _MeasurementGoal(Goal):
|
|
|
12
12
|
|
|
13
13
|
def function(self, optimization_problem, ensemble_member):
|
|
14
14
|
op = optimization_problem
|
|
15
|
-
return (
|
|
16
|
-
|
|
17
|
-
|
|
15
|
+
return op.state_at(self.__state, op.initial_time, ensemble_member) - op.timeseries_at(
|
|
16
|
+
self.__measurement_id, op.initial_time, ensemble_member
|
|
17
|
+
)
|
|
18
18
|
|
|
19
19
|
order = 2
|
|
20
20
|
priority = -2
|
|
@@ -29,9 +29,9 @@ class _SmoothingGoal(Goal):
|
|
|
29
29
|
|
|
30
30
|
def function(self, optimization_problem, ensemble_member):
|
|
31
31
|
op = optimization_problem
|
|
32
|
-
return (
|
|
33
|
-
|
|
34
|
-
|
|
32
|
+
return op.state_at(self.__state1, op.initial_time, ensemble_member) - op.state_at(
|
|
33
|
+
self.__state2, op.initial_time, ensemble_member
|
|
34
|
+
)
|
|
35
35
|
|
|
36
36
|
order = 2
|
|
37
37
|
priority = -1
|
|
@@ -61,8 +61,8 @@ class InitialStateEstimationMixin(GoalProgrammingMixin):
|
|
|
61
61
|
|
|
62
62
|
def initial_state_measurements(self) -> List[Union[Tuple[str, str], Tuple[str, str, float]]]:
|
|
63
63
|
"""
|
|
64
|
-
List of pairs ``(state, measurement_id)`` or triples ``(state, measurement_id,
|
|
65
|
-
relating states to measurement time series IDs.
|
|
64
|
+
List of pairs ``(state, measurement_id)`` or triples ``(state, measurement_id,
|
|
65
|
+
max_deviation)``, relating states to measurement time series IDs.
|
|
66
66
|
|
|
67
67
|
The default maximum deviation is ``1.0``.
|
|
68
68
|
"""
|
|
@@ -4,7 +4,6 @@ import warnings
|
|
|
4
4
|
from abc import ABCMeta, abstractmethod
|
|
5
5
|
|
|
6
6
|
import casadi as ca
|
|
7
|
-
|
|
8
7
|
import numpy as np
|
|
9
8
|
|
|
10
9
|
from rtctools._internal.caching import cached
|
|
@@ -54,7 +53,7 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
54
53
|
|
|
55
54
|
@abstractmethod
|
|
56
55
|
def write(self) -> None:
|
|
57
|
-
"""
|
|
56
|
+
"""
|
|
58
57
|
Writes output data to files
|
|
59
58
|
"""
|
|
60
59
|
pass
|
|
@@ -81,17 +80,17 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
81
80
|
return Timeseries(*self.io.get_timeseries_sec(variable, ensemble_member))
|
|
82
81
|
|
|
83
82
|
def set_timeseries(
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
83
|
+
self,
|
|
84
|
+
variable: str,
|
|
85
|
+
timeseries: Timeseries,
|
|
86
|
+
ensemble_member: int = 0,
|
|
87
|
+
output: bool = True,
|
|
88
|
+
check_consistency: bool = True,
|
|
89
|
+
):
|
|
91
90
|
def stretch_values(values, t_pos):
|
|
92
91
|
# Construct a values range with preceding and possibly following nans
|
|
93
92
|
new_values = np.full(self.io.times_sec.shape, np.nan)
|
|
94
|
-
new_values[t_pos:t_pos + len(values)] = values
|
|
93
|
+
new_values[t_pos : t_pos + len(values)] = values
|
|
95
94
|
return new_values
|
|
96
95
|
|
|
97
96
|
if output:
|
|
@@ -99,9 +98,12 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
99
98
|
|
|
100
99
|
if isinstance(timeseries, Timeseries):
|
|
101
100
|
if len(timeseries.values) != len(timeseries.times):
|
|
102
|
-
raise ValueError(
|
|
103
|
-
|
|
104
|
-
|
|
101
|
+
raise ValueError(
|
|
102
|
+
"IOMixin: Trying to set timeseries {} with times and values that are of "
|
|
103
|
+
"different length (lengths of {} and {}, respectively).".format(
|
|
104
|
+
variable, len(timeseries.times), len(timeseries.values)
|
|
105
|
+
)
|
|
106
|
+
)
|
|
105
107
|
|
|
106
108
|
timeseries_times_sec = self.io.times_sec
|
|
107
109
|
|
|
@@ -109,10 +111,10 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
109
111
|
if check_consistency:
|
|
110
112
|
if not set(timeseries_times_sec).issuperset(timeseries.times):
|
|
111
113
|
raise ValueError(
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
114
|
+
"IOMixin: Trying to set timeseries {} with different times "
|
|
115
|
+
"(in seconds) than the imported timeseries. Please make sure the "
|
|
116
|
+
"timeseries covers all timesteps of the longest "
|
|
117
|
+
"imported timeseries.".format(variable)
|
|
116
118
|
)
|
|
117
119
|
|
|
118
120
|
# Determine position of first times of added timeseries within the
|
|
@@ -131,15 +133,18 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
131
133
|
|
|
132
134
|
if check_consistency:
|
|
133
135
|
if len(self.times()) != len(timeseries):
|
|
134
|
-
raise ValueError(
|
|
135
|
-
|
|
136
|
-
|
|
136
|
+
raise ValueError(
|
|
137
|
+
"IOMixin: Trying to set values for {} with a different "
|
|
138
|
+
"length ({}) than the forecast length ({}).".format(
|
|
139
|
+
variable, len(timeseries), len(self.times())
|
|
140
|
+
)
|
|
141
|
+
)
|
|
137
142
|
elif not set(timeseries_times_sec).issuperset(self.times()):
|
|
138
143
|
raise ValueError(
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
144
|
+
"IOMixin: Trying to set timeseries {} with different times "
|
|
145
|
+
"(in seconds) than the imported timeseries. Please make sure the "
|
|
146
|
+
"timeseries covers all timesteps of the longest "
|
|
147
|
+
"imported timeseries.".format(variable)
|
|
143
148
|
)
|
|
144
149
|
|
|
145
150
|
# If times is not supplied with the timeseries, we add the
|
|
@@ -158,7 +163,7 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
158
163
|
|
|
159
164
|
:param variable: Variable name.
|
|
160
165
|
"""
|
|
161
|
-
return
|
|
166
|
+
return "_".join((variable, "Min"))
|
|
162
167
|
|
|
163
168
|
def max_timeseries_id(self, variable: str) -> str:
|
|
164
169
|
"""
|
|
@@ -166,7 +171,7 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
166
171
|
|
|
167
172
|
:param variable: Variable name.
|
|
168
173
|
"""
|
|
169
|
-
return
|
|
174
|
+
return "_".join((variable, "Max"))
|
|
170
175
|
|
|
171
176
|
@cached
|
|
172
177
|
def bounds(self):
|
|
@@ -177,7 +182,7 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
177
182
|
t_pos = bisect.bisect_left(io_times, self.initial_time)
|
|
178
183
|
|
|
179
184
|
# Load bounds from timeseries
|
|
180
|
-
for variable in self.dae_variables[
|
|
185
|
+
for variable in self.dae_variables["free_variables"]:
|
|
181
186
|
variable_name = variable.name()
|
|
182
187
|
|
|
183
188
|
m, M = None, None
|
|
@@ -222,17 +227,18 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
222
227
|
|
|
223
228
|
end_index = bisect.bisect_left(self.io.times_sec, self.initial_time) + 1
|
|
224
229
|
|
|
225
|
-
variable_list =
|
|
226
|
-
self.dae_variables[
|
|
230
|
+
variable_list = (
|
|
231
|
+
self.dae_variables["states"]
|
|
232
|
+
+ self.dae_variables["algebraics"]
|
|
233
|
+
+ self.dae_variables["control_inputs"]
|
|
234
|
+
+ self.dae_variables["constant_inputs"]
|
|
235
|
+
)
|
|
227
236
|
|
|
228
237
|
for variable in variable_list:
|
|
229
238
|
variable = variable.name()
|
|
230
239
|
try:
|
|
231
240
|
times, values = self.io.get_timeseries_sec(variable, ensemble_member)
|
|
232
|
-
history[variable] = Timeseries(
|
|
233
|
-
times[:end_index],
|
|
234
|
-
values[:end_index]
|
|
235
|
-
)
|
|
241
|
+
history[variable] = Timeseries(times[:end_index], values[:end_index])
|
|
236
242
|
except KeyError:
|
|
237
243
|
pass
|
|
238
244
|
else:
|
|
@@ -246,12 +252,10 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
246
252
|
seed = super().seed(ensemble_member)
|
|
247
253
|
|
|
248
254
|
# Load seeds
|
|
249
|
-
for variable in self.dae_variables[
|
|
255
|
+
for variable in self.dae_variables["free_variables"]:
|
|
250
256
|
variable = variable.name()
|
|
251
257
|
try:
|
|
252
|
-
s = Timeseries(
|
|
253
|
-
*self.io.get_timeseries_sec(variable, ensemble_member)
|
|
254
|
-
)
|
|
258
|
+
s = Timeseries(*self.io.get_timeseries_sec(variable, ensemble_member))
|
|
255
259
|
except KeyError:
|
|
256
260
|
pass
|
|
257
261
|
else:
|
|
@@ -279,12 +283,10 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
279
283
|
constant_inputs = super().constant_inputs(ensemble_member)
|
|
280
284
|
|
|
281
285
|
# Load inputs from timeseries
|
|
282
|
-
for variable in self.dae_variables[
|
|
286
|
+
for variable in self.dae_variables["constant_inputs"]:
|
|
283
287
|
variable = variable.name()
|
|
284
288
|
try:
|
|
285
|
-
timeseries = Timeseries(
|
|
286
|
-
*self.io.get_timeseries_sec(variable, ensemble_member)
|
|
287
|
-
)
|
|
289
|
+
timeseries = Timeseries(*self.io.get_timeseries_sec(variable, ensemble_member))
|
|
288
290
|
except KeyError:
|
|
289
291
|
pass
|
|
290
292
|
else:
|
|
@@ -310,6 +312,9 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
|
|
|
310
312
|
Deprecated, use `io.reference_datetime` and `io.datetimes`, or override behavior using
|
|
311
313
|
:py:meth:`OptimizationProblem.times` and/or :py:attr:`OptimizationProblem.initial_time`.
|
|
312
314
|
"""
|
|
313
|
-
warnings.warn(
|
|
314
|
-
|
|
315
|
+
warnings.warn(
|
|
316
|
+
"get_forecast_index() is deprecated and will be removed in the future",
|
|
317
|
+
FutureWarning,
|
|
318
|
+
stacklevel=1,
|
|
319
|
+
)
|
|
315
320
|
return bisect.bisect_left(self.io.datetimes, self.io.reference_datetime)
|
|
@@ -19,7 +19,9 @@ class LinearizationMixin(OptimizationProblem):
|
|
|
19
19
|
parameters = super().parameters(ensemble_member)
|
|
20
20
|
|
|
21
21
|
for parameter, timeseries_id in self.linearization_parameters().items():
|
|
22
|
-
parameters[parameter] = self.timeseries_at(
|
|
22
|
+
parameters[parameter] = self.timeseries_at(
|
|
23
|
+
timeseries_id, self.initial_time, ensemble_member
|
|
24
|
+
)
|
|
23
25
|
|
|
24
26
|
return parameters
|
|
25
27
|
|
|
@@ -1,14 +1,18 @@
|
|
|
1
1
|
import casadi as ca
|
|
2
|
-
|
|
3
2
|
import numpy as np
|
|
4
3
|
|
|
5
|
-
from rtctools.optimization.goal_programming_mixin_base import
|
|
6
|
-
|
|
4
|
+
from rtctools.optimization.goal_programming_mixin_base import (
|
|
5
|
+
Goal,
|
|
6
|
+
StateGoal,
|
|
7
|
+
_GoalConstraint,
|
|
8
|
+
_GoalProgrammingMixinBase,
|
|
9
|
+
)
|
|
7
10
|
|
|
8
11
|
|
|
9
12
|
class LinearizedOrderGoal(Goal):
|
|
10
13
|
#: Override linearization of goal order. Related global goal programming
|
|
11
|
-
#: option is ``linearize_goal_order``
|
|
14
|
+
#: option is ``linearize_goal_order``
|
|
15
|
+
#: (see :py:meth:`LinearizedOrderGoalProgrammingMixin.goal_programming_options`).
|
|
12
16
|
#: The default value of None defers to the global option, but the user can
|
|
13
17
|
#: explicitly override it per goal by setting this value to True or False.
|
|
14
18
|
linearize_order = None
|
|
@@ -17,7 +21,7 @@ class LinearizedOrderGoal(Goal):
|
|
|
17
21
|
_linear_coefficients = {}
|
|
18
22
|
|
|
19
23
|
@classmethod
|
|
20
|
-
def _get_linear_coefficients(cls, order, eps=0.1, kind=
|
|
24
|
+
def _get_linear_coefficients(cls, order, eps=0.1, kind="balanced"):
|
|
21
25
|
assert order > 1, "Order should be strictly larger than one"
|
|
22
26
|
|
|
23
27
|
try:
|
|
@@ -25,15 +29,15 @@ class LinearizedOrderGoal(Goal):
|
|
|
25
29
|
except KeyError:
|
|
26
30
|
pass
|
|
27
31
|
|
|
28
|
-
x = ca.SX.sym(
|
|
29
|
-
a = ca.SX.sym(
|
|
30
|
-
b = ca.SX.sym(
|
|
32
|
+
x = ca.SX.sym("x")
|
|
33
|
+
a = ca.SX.sym("a")
|
|
34
|
+
b = ca.SX.sym("b")
|
|
31
35
|
|
|
32
36
|
# Strike a balance between "absolute error < eps" and "relative error < eps" by
|
|
33
37
|
# multiplying eps with x**(order-1)
|
|
34
|
-
if kind ==
|
|
35
|
-
f = x**order - eps * x**(order-1) - (a * x + b)
|
|
36
|
-
elif kind ==
|
|
38
|
+
if kind == "balanced":
|
|
39
|
+
f = x**order - eps * x ** (order - 1) - (a * x + b)
|
|
40
|
+
elif kind == "abs":
|
|
37
41
|
f = x**order - eps - (a * x + b)
|
|
38
42
|
else:
|
|
39
43
|
raise Exception("Unknown error approximation strategy '{}'".format(kind))
|
|
@@ -61,7 +65,7 @@ class LinearizedOrderGoal(Goal):
|
|
|
61
65
|
xs = np.array(xs)
|
|
62
66
|
ys = xs**order
|
|
63
67
|
|
|
64
|
-
a = (ys[1:] - ys[:-1])/(xs[1:] - xs[:-1])
|
|
68
|
+
a = (ys[1:] - ys[:-1]) / (xs[1:] - xs[:-1])
|
|
65
69
|
b = ys[1:] - a * xs[1:]
|
|
66
70
|
lines = list(zip(a, b))
|
|
67
71
|
|
|
@@ -76,6 +80,7 @@ class LinearizedOrderStateGoal(LinearizedOrderGoal, StateGoal):
|
|
|
76
80
|
it is possible to just inherit from :py:class:`.LinearizedOrderGoal` to get the needed
|
|
77
81
|
functionality for control of the linearization at goal level.
|
|
78
82
|
"""
|
|
83
|
+
|
|
79
84
|
pass
|
|
80
85
|
|
|
81
86
|
|
|
@@ -98,7 +103,7 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
98
103
|
:py:attr:`LinearizedOrderGoal.linearize_order`).
|
|
99
104
|
"""
|
|
100
105
|
options = super().goal_programming_options()
|
|
101
|
-
options[
|
|
106
|
+
options["linearize_goal_order"] = True
|
|
102
107
|
return options
|
|
103
108
|
|
|
104
109
|
def _gp_validate_goals(self, goals, is_path_goal):
|
|
@@ -109,10 +114,12 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
109
114
|
if isinstance(goal, LinearizedOrderGoal):
|
|
110
115
|
goal_linearize = goal.linearize_order
|
|
111
116
|
|
|
112
|
-
if goal_linearize or (options[
|
|
117
|
+
if goal_linearize or (options["linearize_goal_order"] and goal_linearize is not False):
|
|
113
118
|
if not goal.has_target_bounds and goal.order > 1:
|
|
114
|
-
raise Exception(
|
|
115
|
-
|
|
119
|
+
raise Exception(
|
|
120
|
+
"Higher order minimization goals not allowed with "
|
|
121
|
+
"`linearize_goal_order` for goal {}".format(goal)
|
|
122
|
+
)
|
|
116
123
|
|
|
117
124
|
super()._gp_validate_goals(goals, is_path_goal)
|
|
118
125
|
|
|
@@ -124,7 +131,7 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
124
131
|
if isinstance(goal, LinearizedOrderGoal):
|
|
125
132
|
goal_linearize = goal.linearize_order
|
|
126
133
|
|
|
127
|
-
if goal_linearize or (options[
|
|
134
|
+
if goal_linearize or (options["linearize_goal_order"] and goal_linearize is not False):
|
|
128
135
|
if goal.order > 1 and not goal.critical:
|
|
129
136
|
return True
|
|
130
137
|
else:
|
|
@@ -147,7 +154,9 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
147
154
|
# Make a linear epsilon, and constraints relating the linear
|
|
148
155
|
# variable to the original objective function
|
|
149
156
|
path_prefix = "path_" if is_path_goal else ""
|
|
150
|
-
linear_variable = ca.MX.sym(
|
|
157
|
+
linear_variable = ca.MX.sym(
|
|
158
|
+
path_prefix + "lineps_{}_{}".format(sym_index, j), goal.size
|
|
159
|
+
)
|
|
151
160
|
|
|
152
161
|
lo_epsilons.append(linear_variable)
|
|
153
162
|
|
|
@@ -163,9 +172,17 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
163
172
|
# having `keep_soft_constraints` = False. This is because the `epsilon` and
|
|
164
173
|
# the `linear_variable` no longer exist in the next priority.
|
|
165
174
|
for ensemble_member in range(self.ensemble_size):
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
175
|
+
|
|
176
|
+
def _f(
|
|
177
|
+
problem,
|
|
178
|
+
goal=goal,
|
|
179
|
+
epsilon_name=epsilon_name,
|
|
180
|
+
linear_variable=linear_variable,
|
|
181
|
+
a=a,
|
|
182
|
+
b=b,
|
|
183
|
+
ensemble_member=ensemble_member,
|
|
184
|
+
is_path_constraint=is_path_goal,
|
|
185
|
+
):
|
|
169
186
|
if is_path_constraint:
|
|
170
187
|
eps = problem.variable(epsilon_name)
|
|
171
188
|
lin = problem.variable(linear_variable.name())
|
|
@@ -175,18 +192,25 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
175
192
|
|
|
176
193
|
return lin - a * eps - b
|
|
177
194
|
|
|
178
|
-
lo_soft_constraints[ensemble_member].append(
|
|
195
|
+
lo_soft_constraints[ensemble_member].append(
|
|
196
|
+
_GoalConstraint(goal, _f, 0.0, np.inf, False)
|
|
197
|
+
)
|
|
179
198
|
|
|
180
|
-
if is_path_goal and options[
|
|
199
|
+
if is_path_goal and options["scale_by_problem_size"]:
|
|
181
200
|
goal_m, goal_M = self._gp_min_max_arrays(goal, target_shape=len(self.times()))
|
|
182
201
|
goal_active = np.isfinite(goal_m) | np.isfinite(goal_M)
|
|
183
202
|
n_active = np.sum(goal_active.astype(int), axis=0)
|
|
184
203
|
else:
|
|
185
204
|
n_active = 1
|
|
186
205
|
|
|
187
|
-
def _objective_func(
|
|
188
|
-
|
|
189
|
-
|
|
206
|
+
def _objective_func(
|
|
207
|
+
problem,
|
|
208
|
+
ensemble_member,
|
|
209
|
+
goal=goal,
|
|
210
|
+
linear_variable=linear_variable,
|
|
211
|
+
is_path_goal=is_path_goal,
|
|
212
|
+
n_active=n_active,
|
|
213
|
+
):
|
|
190
214
|
if is_path_goal:
|
|
191
215
|
lin = problem.variable(linear_variable.name())
|
|
192
216
|
else:
|
|
@@ -196,8 +220,13 @@ class LinearizedOrderGoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
196
220
|
|
|
197
221
|
goal._objective_func = _objective_func
|
|
198
222
|
|
|
199
|
-
|
|
200
|
-
|
|
223
|
+
(
|
|
224
|
+
epsilons,
|
|
225
|
+
objectives,
|
|
226
|
+
soft_constraints,
|
|
227
|
+
hard_constraints,
|
|
228
|
+
extra_constants,
|
|
229
|
+
) = super()._gp_goal_constraints(goals, sym_index, options, is_path_goal)
|
|
201
230
|
|
|
202
231
|
epsilons = epsilons + lo_epsilons
|
|
203
232
|
for ensemble_member in range(self.ensemble_size):
|