sleipnirgroup-jormungandr 0.2.1.dev11__cp312-abi3-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sleipnirgroup-jormungandr might be problematic. Click here for more details.
- jormungandr/__init__.py +8 -0
- jormungandr/__init__.pyi +1 -0
- jormungandr/_jormungandr.abi3.so +0 -0
- jormungandr/autodiff/__init__.py +58 -0
- jormungandr/autodiff/__init__.pyi +2528 -0
- jormungandr/optimization/__init__.py +39 -0
- jormungandr/optimization/__init__.pyi +674 -0
- jormungandr/py.typed +0 -0
- sleipnirgroup_jormungandr-0.2.1.dev11.dist-info/LICENSE.txt +11 -0
- sleipnirgroup_jormungandr-0.2.1.dev11.dist-info/METADATA +365 -0
- sleipnirgroup_jormungandr-0.2.1.dev11.dist-info/RECORD +13 -0
- sleipnirgroup_jormungandr-0.2.1.dev11.dist-info/WHEEL +4 -0
- sleipnirgroup_jormungandr-0.2.1.dev11.dist-info/entry_points.txt +0 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import concurrent.futures
|
|
2
|
+
|
|
3
|
+
from .._jormungandr.optimization import *
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def multistart(solve, initial_guesses):
|
|
7
|
+
"""
|
|
8
|
+
Solves an optimization problem from different starting points in parallel,
|
|
9
|
+
then returns the solution with the lowest cost.
|
|
10
|
+
|
|
11
|
+
Each solve is performed on a separate thread. Solutions from successful
|
|
12
|
+
solves are always preferred over solutions from unsuccessful solves, and
|
|
13
|
+
cost (lower is better) is the tiebreaker between successful solves.
|
|
14
|
+
|
|
15
|
+
Parameter ``solve``:
|
|
16
|
+
A user-provided function that takes a decision variable initial guess
|
|
17
|
+
and returns a MultistartResult.
|
|
18
|
+
|
|
19
|
+
Parameter ``initial_guesses``:
|
|
20
|
+
A list of decision variable initial guesses to try.
|
|
21
|
+
"""
|
|
22
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
23
|
+
max_workers=len(initial_guesses)
|
|
24
|
+
) as executor:
|
|
25
|
+
futures = [
|
|
26
|
+
executor.submit(solve, initial_guess) for initial_guess in initial_guesses
|
|
27
|
+
]
|
|
28
|
+
results = [
|
|
29
|
+
future.result() for future in concurrent.futures.as_completed(futures)
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
# Prioritize successful solve, otherwise prioritize solution with lower cost
|
|
33
|
+
return min(
|
|
34
|
+
results,
|
|
35
|
+
key=lambda x: (
|
|
36
|
+
int(x[0] != ExitStatus.SUCCESS),
|
|
37
|
+
x[1],
|
|
38
|
+
),
|
|
39
|
+
)
|
|
@@ -0,0 +1,674 @@
|
|
|
1
|
+
from collections.abc import Callable, Sequence
|
|
2
|
+
import datetime
|
|
3
|
+
import enum
|
|
4
|
+
from typing import Annotated, overload
|
|
5
|
+
|
|
6
|
+
import numpy
|
|
7
|
+
from numpy.typing import NDArray
|
|
8
|
+
import scipy
|
|
9
|
+
|
|
10
|
+
import _jormungandr.autodiff
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EqualityConstraints:
|
|
14
|
+
"""
|
|
15
|
+
A vector of equality constraints of the form cₑ(x) = 0.
|
|
16
|
+
|
|
17
|
+
Template parameter ``Scalar``:
|
|
18
|
+
Scalar type.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, equality_constraints: Sequence[EqualityConstraints]) -> None:
|
|
22
|
+
"""
|
|
23
|
+
Concatenates multiple equality constraints.
|
|
24
|
+
|
|
25
|
+
This overload is for Python bindings only.
|
|
26
|
+
|
|
27
|
+
Parameter ``equality_constraints``:
|
|
28
|
+
The list of EqualityConstraints to concatenate.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __bool__(self) -> bool:
|
|
32
|
+
"""Implicit conversion operator to bool."""
|
|
33
|
+
|
|
34
|
+
class InequalityConstraints:
|
|
35
|
+
"""
|
|
36
|
+
A vector of inequality constraints of the form cᵢ(x) ≥ 0.
|
|
37
|
+
|
|
38
|
+
Template parameter ``Scalar``:
|
|
39
|
+
Scalar type.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(self, inequality_constraints: Sequence[InequalityConstraints]) -> None:
|
|
43
|
+
"""
|
|
44
|
+
Concatenates multiple inequality constraints.
|
|
45
|
+
|
|
46
|
+
This overload is for Python bindings only.
|
|
47
|
+
|
|
48
|
+
Parameter ``inequality_constraints``:
|
|
49
|
+
The list of InequalityConstraints to concatenate.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __bool__(self) -> bool:
|
|
53
|
+
"""Implicit conversion operator to bool."""
|
|
54
|
+
|
|
55
|
+
class ExitStatus(enum.Enum):
|
|
56
|
+
"""Solver exit status. Negative values indicate failure."""
|
|
57
|
+
|
|
58
|
+
SUCCESS = 0
|
|
59
|
+
"""Solved the problem to the desired tolerance."""
|
|
60
|
+
|
|
61
|
+
CALLBACK_REQUESTED_STOP = 1
|
|
62
|
+
"""
|
|
63
|
+
The solver returned its solution so far after the user requested a
|
|
64
|
+
stop.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
TOO_FEW_DOFS = -1
|
|
68
|
+
"""The solver determined the problem to be overconstrained and gave up."""
|
|
69
|
+
|
|
70
|
+
LOCALLY_INFEASIBLE = -2
|
|
71
|
+
"""
|
|
72
|
+
The solver determined the problem to be locally infeasible and gave
|
|
73
|
+
up.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
GLOBALLY_INFEASIBLE = -3
|
|
77
|
+
"""
|
|
78
|
+
The problem setup frontend determined the problem to have an empty
|
|
79
|
+
feasible region.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
FACTORIZATION_FAILED = -4
|
|
83
|
+
"""The linear system factorization failed."""
|
|
84
|
+
|
|
85
|
+
LINE_SEARCH_FAILED = -5
|
|
86
|
+
"""
|
|
87
|
+
The backtracking line search failed, and the problem isn't locally
|
|
88
|
+
infeasible.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
NONFINITE_INITIAL_COST_OR_CONSTRAINTS = -6
|
|
92
|
+
"""
|
|
93
|
+
The solver encountered nonfinite initial cost or constraints and gave
|
|
94
|
+
up.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
DIVERGING_ITERATES = -7
|
|
98
|
+
"""
|
|
99
|
+
The solver encountered diverging primal iterates xₖ and/or sₖ and gave
|
|
100
|
+
up.
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
MAX_ITERATIONS_EXCEEDED = -8
|
|
104
|
+
"""
|
|
105
|
+
The solver returned its solution so far after exceeding the maximum
|
|
106
|
+
number of iterations.
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
TIMEOUT = -9
|
|
110
|
+
"""
|
|
111
|
+
The solver returned its solution so far after exceeding the maximum
|
|
112
|
+
elapsed wall clock time.
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
class IterationInfo:
|
|
116
|
+
"""
|
|
117
|
+
Solver iteration information exposed to an iteration callback.
|
|
118
|
+
|
|
119
|
+
Template parameter ``Scalar``:
|
|
120
|
+
Scalar type.
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
@property
|
|
124
|
+
def iteration(self) -> int:
|
|
125
|
+
"""The solver iteration."""
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def x(self) -> Annotated[NDArray[numpy.float64], dict(shape=(None,), order='C')]:
|
|
129
|
+
"""The decision variables."""
|
|
130
|
+
|
|
131
|
+
@property
|
|
132
|
+
def g(self) -> scipy.sparse.csc_matrix[float]:
|
|
133
|
+
"""The gradient of the cost function."""
|
|
134
|
+
|
|
135
|
+
@property
|
|
136
|
+
def H(self) -> scipy.sparse.csc_matrix[float]:
|
|
137
|
+
"""The Hessian of the Lagrangian."""
|
|
138
|
+
|
|
139
|
+
@property
|
|
140
|
+
def A_e(self) -> scipy.sparse.csc_matrix[float]:
|
|
141
|
+
"""The equality constraint Jacobian."""
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def A_i(self) -> scipy.sparse.csc_matrix[float]:
|
|
145
|
+
"""The inequality constraint Jacobian."""
|
|
146
|
+
|
|
147
|
+
class Problem:
|
|
148
|
+
"""
|
|
149
|
+
This class allows the user to pose a constrained nonlinear
|
|
150
|
+
optimization problem in natural mathematical notation and solve it.
|
|
151
|
+
|
|
152
|
+
This class supports problems of the form: @verbatim minₓ f(x) subject
|
|
153
|
+
to cₑ(x) = 0 cᵢ(x) ≥ 0 @endverbatim
|
|
154
|
+
|
|
155
|
+
where f(x) is the scalar cost function, x is the vector of decision
|
|
156
|
+
variables (variables the solver can tweak to minimize the cost
|
|
157
|
+
function), cᵢ(x) are the inequality constraints, and cₑ(x) are the
|
|
158
|
+
equality constraints. Constraints are equations or inequalities of the
|
|
159
|
+
decision variables that constrain what values the solver is allowed to
|
|
160
|
+
use when searching for an optimal solution.
|
|
161
|
+
|
|
162
|
+
The nice thing about this class is users don't have to put their
|
|
163
|
+
system in the form shown above manually; they can write it in natural
|
|
164
|
+
mathematical form and it'll be converted for them.
|
|
165
|
+
|
|
166
|
+
Template parameter ``Scalar``:
|
|
167
|
+
Scalar type.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
def __init__(self) -> None:
|
|
171
|
+
"""Construct the optimization problem."""
|
|
172
|
+
|
|
173
|
+
@overload
|
|
174
|
+
def decision_variable(self) -> _jormungandr.autodiff.Variable:
|
|
175
|
+
"""
|
|
176
|
+
Create a decision variable in the optimization problem.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
A decision variable in the optimization problem.
|
|
180
|
+
"""
|
|
181
|
+
|
|
182
|
+
@overload
|
|
183
|
+
def decision_variable(self, rows: int, cols: int = 1) -> _jormungandr.autodiff.VariableMatrix:
|
|
184
|
+
"""
|
|
185
|
+
Create a matrix of decision variables in the optimization problem.
|
|
186
|
+
|
|
187
|
+
Parameter ``rows``:
|
|
188
|
+
Number of matrix rows.
|
|
189
|
+
|
|
190
|
+
Parameter ``cols``:
|
|
191
|
+
Number of matrix columns.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
A matrix of decision variables in the optimization problem.
|
|
195
|
+
"""
|
|
196
|
+
|
|
197
|
+
def symmetric_decision_variable(self, rows: int) -> _jormungandr.autodiff.VariableMatrix:
|
|
198
|
+
"""
|
|
199
|
+
Create a symmetric matrix of decision variables in the optimization
|
|
200
|
+
problem.
|
|
201
|
+
|
|
202
|
+
Variable instances are reused across the diagonal, which helps reduce
|
|
203
|
+
problem dimensionality.
|
|
204
|
+
|
|
205
|
+
Parameter ``rows``:
|
|
206
|
+
Number of matrix rows.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
A symmetric matrix of decision varaibles in the optimization
|
|
210
|
+
problem.
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
@overload
|
|
214
|
+
def minimize(self, cost: _jormungandr.autodiff.Variable) -> None:
|
|
215
|
+
"""
|
|
216
|
+
Tells the solver to minimize the output of the given cost function.
|
|
217
|
+
|
|
218
|
+
Note that this is optional. If only constraints are specified, the
|
|
219
|
+
solver will find the closest solution to the initial conditions that's
|
|
220
|
+
in the feasible set.
|
|
221
|
+
|
|
222
|
+
Parameter ``cost``:
|
|
223
|
+
The cost function to minimize.
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
@overload
|
|
227
|
+
def minimize(self, cost: _jormungandr.autodiff.VariableMatrix) -> None: ...
|
|
228
|
+
|
|
229
|
+
@overload
|
|
230
|
+
def minimize(self, cost: float) -> None: ...
|
|
231
|
+
|
|
232
|
+
@overload
|
|
233
|
+
def maximize(self, objective: _jormungandr.autodiff.Variable) -> None:
|
|
234
|
+
"""
|
|
235
|
+
Tells the solver to maximize the output of the given objective
|
|
236
|
+
function.
|
|
237
|
+
|
|
238
|
+
Note that this is optional. If only constraints are specified, the
|
|
239
|
+
solver will find the closest solution to the initial conditions that's
|
|
240
|
+
in the feasible set.
|
|
241
|
+
|
|
242
|
+
Parameter ``objective``:
|
|
243
|
+
The objective function to maximize.
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
@overload
|
|
247
|
+
def maximize(self, objective: _jormungandr.autodiff.VariableMatrix) -> None: ...
|
|
248
|
+
|
|
249
|
+
@overload
|
|
250
|
+
def maximize(self, objective: float) -> None: ...
|
|
251
|
+
|
|
252
|
+
@overload
|
|
253
|
+
def subject_to(self, constraint: EqualityConstraints) -> None:
|
|
254
|
+
"""
|
|
255
|
+
Tells the solver to solve the problem while satisfying the given
|
|
256
|
+
equality constraint.
|
|
257
|
+
|
|
258
|
+
Parameter ``constraint``:
|
|
259
|
+
The constraint to satisfy.
|
|
260
|
+
"""
|
|
261
|
+
|
|
262
|
+
@overload
|
|
263
|
+
def subject_to(self, constraint: InequalityConstraints) -> None:
|
|
264
|
+
"""
|
|
265
|
+
Tells the solver to solve the problem while satisfying the given
|
|
266
|
+
inequality constraint.
|
|
267
|
+
|
|
268
|
+
Parameter ``constraint``:
|
|
269
|
+
The constraint to satisfy.
|
|
270
|
+
"""
|
|
271
|
+
|
|
272
|
+
def cost_function_type(self) -> _jormungandr.autodiff.ExpressionType:
|
|
273
|
+
"""
|
|
274
|
+
Returns the cost function's type.
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
The cost function's type.
|
|
278
|
+
"""
|
|
279
|
+
|
|
280
|
+
def equality_constraint_type(self) -> _jormungandr.autodiff.ExpressionType:
|
|
281
|
+
"""
|
|
282
|
+
Returns the type of the highest order equality constraint.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
The type of the highest order equality constraint.
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
def inequality_constraint_type(self) -> _jormungandr.autodiff.ExpressionType:
|
|
289
|
+
"""
|
|
290
|
+
Returns the type of the highest order inequality constraint.
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
The type of the highest order inequality constraint.
|
|
294
|
+
"""
|
|
295
|
+
|
|
296
|
+
def solve(self, **kwargs) -> ExitStatus:
|
|
297
|
+
"""
|
|
298
|
+
Solve the optimization problem. The solution will be stored in the
|
|
299
|
+
original variables used to construct the problem.
|
|
300
|
+
|
|
301
|
+
Parameter ``tolerance``:
|
|
302
|
+
The solver will stop once the error is below this tolerance.
|
|
303
|
+
(default: 1e-8)
|
|
304
|
+
|
|
305
|
+
Parameter ``max_iterations``:
|
|
306
|
+
The maximum number of solver iterations before returning a solution.
|
|
307
|
+
(default: 5000)
|
|
308
|
+
|
|
309
|
+
Parameter ``timeout``:
|
|
310
|
+
The maximum elapsed wall clock time before returning a solution.
|
|
311
|
+
(default: infinity)
|
|
312
|
+
|
|
313
|
+
Parameter ``feasible_ipm``:
|
|
314
|
+
Enables the feasible interior-point method. When the inequality
|
|
315
|
+
constraints are all feasible, step sizes are reduced when necessary to
|
|
316
|
+
prevent them becoming infeasible again. This is useful when parts of the
|
|
317
|
+
problem are ill-conditioned in infeasible regions (e.g., square root of a
|
|
318
|
+
negative value). This can slow or prevent progress toward a solution
|
|
319
|
+
though, so only enable it if necessary.
|
|
320
|
+
(default: False)
|
|
321
|
+
|
|
322
|
+
Parameter ``diagnostics``:
|
|
323
|
+
Enables diagnostic prints.
|
|
324
|
+
|
|
325
|
+
<table>
|
|
326
|
+
<tr>
|
|
327
|
+
<th>Heading</th>
|
|
328
|
+
<th>Description</th>
|
|
329
|
+
</tr>
|
|
330
|
+
<tr>
|
|
331
|
+
<td>iter</td>
|
|
332
|
+
<td>Iteration number</td>
|
|
333
|
+
</tr>
|
|
334
|
+
<tr>
|
|
335
|
+
<td>type</td>
|
|
336
|
+
<td>Iteration type (normal, accepted second-order correction, rejected second-order correction)</td>
|
|
337
|
+
</tr>
|
|
338
|
+
<tr>
|
|
339
|
+
<td>time (ms)</td>
|
|
340
|
+
<td>Duration of iteration in milliseconds</td>
|
|
341
|
+
</tr>
|
|
342
|
+
<tr>
|
|
343
|
+
<td>error</td>
|
|
344
|
+
<td>Error estimate</td>
|
|
345
|
+
</tr>
|
|
346
|
+
<tr>
|
|
347
|
+
<td>cost</td>
|
|
348
|
+
<td>Cost function value at current iterate</td>
|
|
349
|
+
</tr>
|
|
350
|
+
<tr>
|
|
351
|
+
<td>infeas.</td>
|
|
352
|
+
<td>Constraint infeasibility at current iterate</td>
|
|
353
|
+
</tr>
|
|
354
|
+
<tr>
|
|
355
|
+
<td>complement.</td>
|
|
356
|
+
<td>Complementary slackness at current iterate (sᵀz)</td>
|
|
357
|
+
</tr>
|
|
358
|
+
<tr>
|
|
359
|
+
<td>μ</td>
|
|
360
|
+
<td>Barrier parameter</td>
|
|
361
|
+
</tr>
|
|
362
|
+
<tr>
|
|
363
|
+
<td>reg</td>
|
|
364
|
+
<td>Iteration matrix regularization</td>
|
|
365
|
+
</tr>
|
|
366
|
+
<tr>
|
|
367
|
+
<td>primal α</td>
|
|
368
|
+
<td>Primal step size</td>
|
|
369
|
+
</tr>
|
|
370
|
+
<tr>
|
|
371
|
+
<td>dual α</td>
|
|
372
|
+
<td>Dual step size</td>
|
|
373
|
+
</tr>
|
|
374
|
+
<tr>
|
|
375
|
+
<td>↩</td>
|
|
376
|
+
<td>Number of line search backtracks</td>
|
|
377
|
+
</tr>
|
|
378
|
+
</table>
|
|
379
|
+
(default: False)
|
|
380
|
+
|
|
381
|
+
Parameter ``spy``:
|
|
382
|
+
Enables writing sparsity patterns of H, Aₑ, and Aᵢ to files named H.spy,
|
|
383
|
+
A_e.spy, and A_i.spy respectively during solve. Use tools/spy.py to plot them.
|
|
384
|
+
(default: False)
|
|
385
|
+
"""
|
|
386
|
+
|
|
387
|
+
def add_callback(self, callback: Callable[[IterationInfo], bool]) -> None:
|
|
388
|
+
"""
|
|
389
|
+
Adds a callback to be called at the beginning of each solver
|
|
390
|
+
iteration.
|
|
391
|
+
|
|
392
|
+
The callback for this overload should return bool.
|
|
393
|
+
|
|
394
|
+
Parameter ``callback``:
|
|
395
|
+
The callback. Returning true from the callback causes the solver
|
|
396
|
+
to exit early with the solution it has so far.
|
|
397
|
+
"""
|
|
398
|
+
|
|
399
|
+
def clear_callbacks(self) -> None:
|
|
400
|
+
"""Clears the registered callbacks."""
|
|
401
|
+
|
|
402
|
+
class DynamicsType(enum.Enum):
|
|
403
|
+
"""Enum describing a type of system dynamics constraints."""
|
|
404
|
+
|
|
405
|
+
EXPLICIT_ODE = 0
|
|
406
|
+
"""The dynamics are a function in the form dx/dt = f(t, x, u)."""
|
|
407
|
+
|
|
408
|
+
DISCRETE = 1
|
|
409
|
+
"""The dynamics are a function in the form xₖ₊₁ = f(t, xₖ, uₖ)."""
|
|
410
|
+
|
|
411
|
+
class TimestepMethod(enum.Enum):
|
|
412
|
+
"""Enum describing the type of system timestep."""
|
|
413
|
+
|
|
414
|
+
FIXED = 0
|
|
415
|
+
"""The timestep is a fixed constant."""
|
|
416
|
+
|
|
417
|
+
VARIABLE = 1
|
|
418
|
+
"""The timesteps are allowed to vary as independent decision variables."""
|
|
419
|
+
|
|
420
|
+
VARIABLE_SINGLE = 2
|
|
421
|
+
"""
|
|
422
|
+
The timesteps are equal length but allowed to vary as a single
|
|
423
|
+
decision variable.
|
|
424
|
+
"""
|
|
425
|
+
|
|
426
|
+
class TranscriptionMethod(enum.Enum):
|
|
427
|
+
"""Enum describing an OCP transcription method."""
|
|
428
|
+
|
|
429
|
+
DIRECT_TRANSCRIPTION = 0
|
|
430
|
+
"""
|
|
431
|
+
Each state is a decision variable constrained to the integrated
|
|
432
|
+
dynamics of the previous state.
|
|
433
|
+
"""
|
|
434
|
+
|
|
435
|
+
DIRECT_COLLOCATION = 1
|
|
436
|
+
"""
|
|
437
|
+
The trajectory is modeled as a series of cubic polynomials where the
|
|
438
|
+
centerpoint slope is constrained.
|
|
439
|
+
"""
|
|
440
|
+
|
|
441
|
+
SINGLE_SHOOTING = 2
|
|
442
|
+
"""
|
|
443
|
+
States depend explicitly as a function of all previous states and all
|
|
444
|
+
previous inputs.
|
|
445
|
+
"""
|
|
446
|
+
|
|
447
|
+
class OCP(Problem):
|
|
448
|
+
"""
|
|
449
|
+
This class allows the user to pose and solve a constrained optimal
|
|
450
|
+
control problem (OCP) in a variety of ways.
|
|
451
|
+
|
|
452
|
+
The system is transcripted by one of three methods (direct
|
|
453
|
+
transcription, direct collocation, or single-shooting) and additional
|
|
454
|
+
constraints can be added.
|
|
455
|
+
|
|
456
|
+
In direct transcription, each state is a decision variable constrained
|
|
457
|
+
to the integrated dynamics of the previous state. In direct
|
|
458
|
+
collocation, the trajectory is modeled as a series of cubic
|
|
459
|
+
polynomials where the centerpoint slope is constrained. In single-
|
|
460
|
+
shooting, states depend explicitly as a function of all previous
|
|
461
|
+
states and all previous inputs.
|
|
462
|
+
|
|
463
|
+
Explicit ODEs are integrated using RK4.
|
|
464
|
+
|
|
465
|
+
For explicit ODEs, the function must be in the form dx/dt = f(t, x,
|
|
466
|
+
u). For discrete state transition functions, the function must be in
|
|
467
|
+
the form xₖ₊₁ = f(t, xₖ, uₖ).
|
|
468
|
+
|
|
469
|
+
Direct collocation requires an explicit ODE. Direct transcription and
|
|
470
|
+
single-shooting can use either an ODE or state transition function.
|
|
471
|
+
|
|
472
|
+
https://underactuated.mit.edu/trajopt.html goes into more detail on
|
|
473
|
+
each transcription method.
|
|
474
|
+
|
|
475
|
+
Template parameter ``Scalar``:
|
|
476
|
+
Scalar type.
|
|
477
|
+
"""
|
|
478
|
+
|
|
479
|
+
def __init__(self, num_states: int, num_inputs: int, dt: datetime.timedelta | float, num_steps: int, dynamics: Callable[[_jormungandr.autodiff.VariableMatrix, _jormungandr.autodiff.VariableMatrix], _jormungandr.autodiff.VariableMatrix], dynamics_type: DynamicsType = DynamicsType.EXPLICIT_ODE, timestep_method: TimestepMethod = TimestepMethod.FIXED, transcription_method: TranscriptionMethod = TranscriptionMethod.DIRECT_TRANSCRIPTION) -> None:
|
|
480
|
+
"""
|
|
481
|
+
Build an optimization problem using a system evolution function
|
|
482
|
+
(explicit ODE or discrete state transition function).
|
|
483
|
+
|
|
484
|
+
Parameter ``num_states``:
|
|
485
|
+
The number of system states.
|
|
486
|
+
|
|
487
|
+
Parameter ``num_inputs``:
|
|
488
|
+
The number of system inputs.
|
|
489
|
+
|
|
490
|
+
Parameter ``dt``:
|
|
491
|
+
The timestep for fixed-step integration.
|
|
492
|
+
|
|
493
|
+
Parameter ``num_steps``:
|
|
494
|
+
The number of control points.
|
|
495
|
+
|
|
496
|
+
Parameter ``dynamics``:
|
|
497
|
+
Function representing an explicit or implicit ODE, or a discrete
|
|
498
|
+
state transition function. - Explicit: dx/dt = f(x, u, *) -
|
|
499
|
+
Implicit: f([x dx/dt]', u, *) = 0 - State transition: xₖ₊₁ = f(xₖ,
|
|
500
|
+
uₖ)
|
|
501
|
+
|
|
502
|
+
Parameter ``dynamics_type``:
|
|
503
|
+
The type of system evolution function.
|
|
504
|
+
|
|
505
|
+
Parameter ``timestep_method``:
|
|
506
|
+
The timestep method.
|
|
507
|
+
|
|
508
|
+
Parameter ``transcription_method``:
|
|
509
|
+
The transcription method.
|
|
510
|
+
"""
|
|
511
|
+
|
|
512
|
+
@overload
|
|
513
|
+
def constrain_initial_state(self, initial_state: float) -> None:
|
|
514
|
+
"""
|
|
515
|
+
Utility function to constrain the initial state.
|
|
516
|
+
|
|
517
|
+
Parameter ``initial_state``:
|
|
518
|
+
the initial state to constrain to.
|
|
519
|
+
"""
|
|
520
|
+
|
|
521
|
+
@overload
|
|
522
|
+
def constrain_initial_state(self, initial_state: int) -> None: ...
|
|
523
|
+
|
|
524
|
+
@overload
|
|
525
|
+
def constrain_initial_state(self, initial_state: _jormungandr.autodiff.Variable) -> None: ...
|
|
526
|
+
|
|
527
|
+
@overload
|
|
528
|
+
def constrain_initial_state(self, initial_state: Annotated[NDArray[numpy.float64], dict(shape=(None, None))]) -> None: ...
|
|
529
|
+
|
|
530
|
+
@overload
|
|
531
|
+
def constrain_initial_state(self, initial_state: _jormungandr.autodiff.VariableMatrix) -> None: ...
|
|
532
|
+
|
|
533
|
+
@overload
|
|
534
|
+
def constrain_final_state(self, final_state: float) -> None:
|
|
535
|
+
"""
|
|
536
|
+
Utility function to constrain the final state.
|
|
537
|
+
|
|
538
|
+
Parameter ``final_state``:
|
|
539
|
+
the final state to constrain to.
|
|
540
|
+
"""
|
|
541
|
+
|
|
542
|
+
@overload
|
|
543
|
+
def constrain_final_state(self, final_state: int) -> None: ...
|
|
544
|
+
|
|
545
|
+
@overload
|
|
546
|
+
def constrain_final_state(self, final_state: _jormungandr.autodiff.Variable) -> None: ...
|
|
547
|
+
|
|
548
|
+
@overload
|
|
549
|
+
def constrain_final_state(self, final_state: Annotated[NDArray[numpy.float64], dict(shape=(None, None))]) -> None: ...
|
|
550
|
+
|
|
551
|
+
@overload
|
|
552
|
+
def constrain_final_state(self, final_state: _jormungandr.autodiff.VariableMatrix) -> None: ...
|
|
553
|
+
|
|
554
|
+
def for_each_step(self, callback: Callable[[_jormungandr.autodiff.VariableMatrix, _jormungandr.autodiff.VariableMatrix], None]) -> None:
|
|
555
|
+
"""
|
|
556
|
+
Set the constraint evaluation function. This function is called
|
|
557
|
+
`num_steps+1` times, with the corresponding state and input
|
|
558
|
+
VariableMatrices.
|
|
559
|
+
|
|
560
|
+
Parameter ``callback``:
|
|
561
|
+
The callback f(x, u) where x is the state and u is the input
|
|
562
|
+
vector.
|
|
563
|
+
"""
|
|
564
|
+
|
|
565
|
+
@overload
|
|
566
|
+
def set_lower_input_bound(self, lower_bound: float) -> None:
|
|
567
|
+
"""
|
|
568
|
+
Convenience function to set a lower bound on the input.
|
|
569
|
+
|
|
570
|
+
Parameter ``lower_bound``:
|
|
571
|
+
The lower bound that inputs must always be above. Must be shaped
|
|
572
|
+
(num_inputs)x1.
|
|
573
|
+
"""
|
|
574
|
+
|
|
575
|
+
@overload
|
|
576
|
+
def set_lower_input_bound(self, lower_bound: int) -> None: ...
|
|
577
|
+
|
|
578
|
+
@overload
|
|
579
|
+
def set_lower_input_bound(self, lower_bound: _jormungandr.autodiff.Variable) -> None: ...
|
|
580
|
+
|
|
581
|
+
@overload
|
|
582
|
+
def set_lower_input_bound(self, lower_bound: Annotated[NDArray[numpy.float64], dict(shape=(None, None))]) -> None: ...
|
|
583
|
+
|
|
584
|
+
@overload
|
|
585
|
+
def set_lower_input_bound(self, lower_bound: _jormungandr.autodiff.VariableMatrix) -> None: ...
|
|
586
|
+
|
|
587
|
+
@overload
|
|
588
|
+
def set_upper_input_bound(self, upper_bound: float) -> None:
|
|
589
|
+
"""
|
|
590
|
+
Convenience function to set an upper bound on the input.
|
|
591
|
+
|
|
592
|
+
Parameter ``upper_bound``:
|
|
593
|
+
The upper bound that inputs must always be below. Must be shaped
|
|
594
|
+
(num_inputs)x1.
|
|
595
|
+
"""
|
|
596
|
+
|
|
597
|
+
@overload
|
|
598
|
+
def set_upper_input_bound(self, upper_bound: int) -> None: ...
|
|
599
|
+
|
|
600
|
+
@overload
|
|
601
|
+
def set_upper_input_bound(self, upper_bound: _jormungandr.autodiff.Variable) -> None: ...
|
|
602
|
+
|
|
603
|
+
@overload
|
|
604
|
+
def set_upper_input_bound(self, upper_bound: Annotated[NDArray[numpy.float64], dict(shape=(None, None))]) -> None: ...
|
|
605
|
+
|
|
606
|
+
@overload
|
|
607
|
+
def set_upper_input_bound(self, upper_bound: _jormungandr.autodiff.VariableMatrix) -> None: ...
|
|
608
|
+
|
|
609
|
+
def set_min_timestep(self, min_timestep: datetime.timedelta | float) -> None:
|
|
610
|
+
"""
|
|
611
|
+
Convenience function to set a lower bound on the timestep.
|
|
612
|
+
|
|
613
|
+
Parameter ``min_timestep``:
|
|
614
|
+
The minimum timestep.
|
|
615
|
+
"""
|
|
616
|
+
|
|
617
|
+
def set_max_timestep(self, max_timestep: datetime.timedelta | float) -> None:
|
|
618
|
+
"""
|
|
619
|
+
Convenience function to set an upper bound on the timestep.
|
|
620
|
+
|
|
621
|
+
Parameter ``max_timestep``:
|
|
622
|
+
The maximum timestep.
|
|
623
|
+
"""
|
|
624
|
+
|
|
625
|
+
def X(self) -> _jormungandr.autodiff.VariableMatrix:
|
|
626
|
+
"""
|
|
627
|
+
Get the state variables. After the problem is solved, this will
|
|
628
|
+
contain the optimized trajectory.
|
|
629
|
+
|
|
630
|
+
Shaped (num_states)x(num_steps+1).
|
|
631
|
+
|
|
632
|
+
Returns:
|
|
633
|
+
The state variable matrix.
|
|
634
|
+
"""
|
|
635
|
+
|
|
636
|
+
def U(self) -> _jormungandr.autodiff.VariableMatrix:
|
|
637
|
+
"""
|
|
638
|
+
Get the input variables. After the problem is solved, this will
|
|
639
|
+
contain the inputs corresponding to the optimized trajectory.
|
|
640
|
+
|
|
641
|
+
Shaped (num_inputs)x(num_steps+1), although the last input step is
|
|
642
|
+
unused in the trajectory.
|
|
643
|
+
|
|
644
|
+
Returns:
|
|
645
|
+
The input variable matrix.
|
|
646
|
+
"""
|
|
647
|
+
|
|
648
|
+
def dt(self) -> _jormungandr.autodiff.VariableMatrix:
|
|
649
|
+
"""
|
|
650
|
+
Get the timestep variables. After the problem is solved, this will
|
|
651
|
+
contain the timesteps corresponding to the optimized trajectory.
|
|
652
|
+
|
|
653
|
+
Shaped 1x(num_steps+1), although the last timestep is unused in the
|
|
654
|
+
trajectory.
|
|
655
|
+
|
|
656
|
+
Returns:
|
|
657
|
+
The timestep variable matrix.
|
|
658
|
+
"""
|
|
659
|
+
|
|
660
|
+
def initial_state(self) -> _jormungandr.autodiff.VariableMatrix:
|
|
661
|
+
"""
|
|
662
|
+
Convenience function to get the initial state in the trajectory.
|
|
663
|
+
|
|
664
|
+
Returns:
|
|
665
|
+
The initial state of the trajectory.
|
|
666
|
+
"""
|
|
667
|
+
|
|
668
|
+
def final_state(self) -> _jormungandr.autodiff.VariableMatrix:
|
|
669
|
+
"""
|
|
670
|
+
Convenience function to get the final state in the trajectory.
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
The final state of the trajectory.
|
|
674
|
+
"""
|