rtc-tools 2.5.2rc3__py3-none-any.whl → 2.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rtc-tools might be problematic. Click here for more details.

Files changed (47) hide show
  1. {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/METADATA +7 -7
  2. rtc_tools-2.6.0.dist-info/RECORD +50 -0
  3. {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/WHEEL +1 -1
  4. rtctools/__init__.py +2 -1
  5. rtctools/_internal/alias_tools.py +12 -10
  6. rtctools/_internal/caching.py +5 -3
  7. rtctools/_internal/casadi_helpers.py +11 -32
  8. rtctools/_internal/debug_check_helpers.py +1 -1
  9. rtctools/_version.py +3 -3
  10. rtctools/data/__init__.py +2 -2
  11. rtctools/data/csv.py +54 -33
  12. rtctools/data/interpolation/bspline.py +3 -3
  13. rtctools/data/interpolation/bspline1d.py +42 -29
  14. rtctools/data/interpolation/bspline2d.py +10 -4
  15. rtctools/data/netcdf.py +137 -93
  16. rtctools/data/pi.py +304 -210
  17. rtctools/data/rtc.py +64 -53
  18. rtctools/data/storage.py +91 -51
  19. rtctools/optimization/collocated_integrated_optimization_problem.py +1244 -696
  20. rtctools/optimization/control_tree_mixin.py +68 -66
  21. rtctools/optimization/csv_lookup_table_mixin.py +107 -74
  22. rtctools/optimization/csv_mixin.py +83 -52
  23. rtctools/optimization/goal_programming_mixin.py +239 -148
  24. rtctools/optimization/goal_programming_mixin_base.py +204 -111
  25. rtctools/optimization/homotopy_mixin.py +36 -27
  26. rtctools/optimization/initial_state_estimation_mixin.py +8 -8
  27. rtctools/optimization/io_mixin.py +48 -43
  28. rtctools/optimization/linearization_mixin.py +3 -1
  29. rtctools/optimization/linearized_order_goal_programming_mixin.py +57 -28
  30. rtctools/optimization/min_abs_goal_programming_mixin.py +72 -29
  31. rtctools/optimization/modelica_mixin.py +135 -81
  32. rtctools/optimization/netcdf_mixin.py +32 -18
  33. rtctools/optimization/optimization_problem.py +181 -127
  34. rtctools/optimization/pi_mixin.py +68 -36
  35. rtctools/optimization/planning_mixin.py +19 -0
  36. rtctools/optimization/single_pass_goal_programming_mixin.py +159 -112
  37. rtctools/optimization/timeseries.py +4 -6
  38. rtctools/rtctoolsapp.py +18 -18
  39. rtctools/simulation/csv_mixin.py +37 -30
  40. rtctools/simulation/io_mixin.py +9 -5
  41. rtctools/simulation/pi_mixin.py +62 -32
  42. rtctools/simulation/simulation_problem.py +471 -180
  43. rtctools/util.py +84 -56
  44. rtc_tools-2.5.2rc3.dist-info/RECORD +0 -49
  45. {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/COPYING.LESSER +0 -0
  46. {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/entry_points.txt +0 -0
  47. {rtc_tools-2.5.2rc3.dist-info → rtc_tools-2.6.0.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,6 @@ from abc import ABCMeta, abstractmethod, abstractproperty
3
3
  from typing import Any, Dict, Iterator, List, Tuple, Union
4
4
 
5
5
  import casadi as ca
6
-
7
6
  import numpy as np
8
7
 
9
8
  from rtctools._internal.alias_tools import AliasDict
@@ -53,8 +52,12 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
53
52
 
54
53
  self.__mixed_integer = False
55
54
 
56
- def optimize(self, preprocessing: bool = True, postprocessing: bool = True,
57
- log_solver_failure_as_error: bool = True) -> bool:
55
+ def optimize(
56
+ self,
57
+ preprocessing: bool = True,
58
+ postprocessing: bool = True,
59
+ log_solver_failure_as_error: bool = True,
60
+ ) -> bool:
58
61
  """
59
62
  Perform one initialize-transcribe-solve-finalize cycle.
60
63
 
@@ -65,8 +68,10 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
65
68
  """
66
69
 
67
70
  # Deprecations / removals
68
- if hasattr(self, 'initial_state'):
69
- raise RuntimeError("Support for `initial_state()` has been removed. Please use `history()` instead.")
71
+ if hasattr(self, "initial_state"):
72
+ raise RuntimeError(
73
+ "Support for `initial_state()` has been removed. Please use `history()` instead."
74
+ )
70
75
 
71
76
  logger.info("Entering optimize()")
72
77
 
@@ -78,8 +83,7 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
78
83
  # Check if control inputs are bounded
79
84
  self.__check_bounds_control_input()
80
85
  else:
81
- logger.debug(
82
- 'Skipping Preprocessing in OptimizationProblem.optimize()')
86
+ logger.debug("Skipping Preprocessing in OptimizationProblem.optimize()")
83
87
 
84
88
  # Transcribe problem
85
89
  discrete, lbx, ubx, lbg, ubg, x0, nlp = self.transcribe()
@@ -93,19 +97,19 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
93
97
 
94
98
  logger.debug("Creating solver")
95
99
 
96
- if options.pop('expand', False):
100
+ if options.pop("expand", False):
97
101
  # NOTE: CasADi only supports the "expand" option for nlpsol. To
98
102
  # also be able to expand with e.g. qpsol, we do the expansion
99
103
  # ourselves here.
100
104
  logger.debug("Expanding objective and constraints to SX")
101
105
 
102
- expand_f_g = ca.Function('f_g', [nlp['x']], [nlp['f'], nlp['g']]).expand()
103
- X_sx = ca.SX.sym('X', *nlp['x'].shape)
106
+ expand_f_g = ca.Function("f_g", [nlp["x"]], [nlp["f"], nlp["g"]]).expand()
107
+ X_sx = ca.SX.sym("X", *nlp["x"].shape)
104
108
  f_sx, g_sx = expand_f_g(X_sx)
105
109
 
106
- nlp['f'] = f_sx
107
- nlp['g'] = g_sx
108
- nlp['x'] = X_sx
110
+ nlp["f"] = f_sx
111
+ nlp["g"] = g_sx
112
+ nlp["x"] = X_sx
109
113
 
110
114
  # Debug check for non-linearity in constraints
111
115
  self.__debug_check_linearity_constraints(nlp)
@@ -114,34 +118,34 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
114
118
  self.__debug_check_linear_independence(lbx, ubx, lbg, ubg, nlp)
115
119
 
116
120
  # Solver option
117
- my_solver = options['solver']
118
- del options['solver']
121
+ my_solver = options["solver"]
122
+ del options["solver"]
119
123
 
120
124
  # Already consumed
121
- del options['optimized_num_dir']
125
+ del options["optimized_num_dir"]
122
126
 
123
127
  # Iteration callback
124
- iteration_callback = options.pop('iteration_callback', None)
128
+ iteration_callback = options.pop("iteration_callback", None)
125
129
 
126
130
  # CasADi solver to use
127
- casadi_solver = options.pop('casadi_solver')
131
+ casadi_solver = options.pop("casadi_solver")
128
132
  if isinstance(casadi_solver, str):
129
133
  casadi_solver = getattr(ca, casadi_solver)
130
134
 
131
135
  nlpsol_options = {**options}
132
136
 
133
137
  if self.__mixed_integer:
134
- nlpsol_options['discrete'] = discrete
138
+ nlpsol_options["discrete"] = discrete
135
139
  if iteration_callback:
136
- nlpsol_options['iteration_callback'] = iteration_callback
140
+ nlpsol_options["iteration_callback"] = iteration_callback
137
141
 
138
142
  # Remove ipopt and bonmin defaults if they are not used
139
- if my_solver != 'ipopt':
140
- nlpsol_options.pop('ipopt', None)
141
- if my_solver != 'bonmin':
142
- nlpsol_options.pop('bonmin', None)
143
+ if my_solver != "ipopt":
144
+ nlpsol_options.pop("ipopt", None)
145
+ if my_solver != "bonmin":
146
+ nlpsol_options.pop("bonmin", None)
143
147
 
144
- solver = casadi_solver('nlp', my_solver, nlp, nlpsol_options)
148
+ solver = casadi_solver("nlp", my_solver, nlp, nlpsol_options)
145
149
 
146
150
  # Solve NLP
147
151
  logger.info("Calling solver")
@@ -149,9 +153,16 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
149
153
  results = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=ca.veccat(*lbg), ubg=ca.veccat(*ubg))
150
154
 
151
155
  # Extract relevant stats
152
- self.__objective_value = float(results['f'])
153
- self.__solver_output = np.array(results['x']).ravel()
154
- self.__transcribed_problem = {"lbx": lbx, "ubx": ubx, "lbg": lbg, "ubg": ubg, "x0": x0, "nlp": nlp}
156
+ self.__objective_value = float(results["f"])
157
+ self.__solver_output = np.array(results["x"]).ravel()
158
+ self.__transcribed_problem = {
159
+ "lbx": lbx,
160
+ "ubx": ubx,
161
+ "lbg": lbg,
162
+ "ubg": ubg,
163
+ "x0": x0,
164
+ "nlp": nlp,
165
+ }
155
166
  self.__lam_g = results.get("lam_g")
156
167
  self.__lam_x = results.get("lam_x")
157
168
 
@@ -159,18 +170,22 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
159
170
 
160
171
  success, log_level = self.solver_success(self.__solver_stats, log_solver_failure_as_error)
161
172
 
162
- return_status = self.__solver_stats['return_status']
163
- if 'secondary_return_status' in self.__solver_stats:
164
- return_status = "{}: {}".format(return_status, self.__solver_stats['secondary_return_status'])
173
+ return_status = self.__solver_stats["return_status"]
174
+ if "secondary_return_status" in self.__solver_stats:
175
+ return_status = "{}: {}".format(
176
+ return_status, self.__solver_stats["secondary_return_status"]
177
+ )
165
178
  wall_clock_time = "elapsed time not read"
166
- if 't_wall_total' in self.__solver_stats:
167
- wall_clock_time = "{} seconds".format(self.__solver_stats['t_wall_total'])
168
- elif 't_wall_solver' in self.__solver_stats:
169
- wall_clock_time = "{} seconds".format(self.__solver_stats['t_wall_solver'])
179
+ if "t_wall_total" in self.__solver_stats:
180
+ wall_clock_time = "{} seconds".format(self.__solver_stats["t_wall_total"])
181
+ elif "t_wall_solver" in self.__solver_stats:
182
+ wall_clock_time = "{} seconds".format(self.__solver_stats["t_wall_solver"])
170
183
 
171
184
  if success:
172
- logger.log(log_level, "Solver succeeded with status {} ({}).".format(
173
- return_status, wall_clock_time))
185
+ logger.log(
186
+ log_level,
187
+ "Solver succeeded with status {} ({}).".format(return_status, wall_clock_time),
188
+ )
174
189
  else:
175
190
  try:
176
191
  ii = [y[0] for y in self.loop_over_error].index(self.priority)
@@ -182,18 +197,21 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
182
197
  except IndexError:
183
198
  if loop_error_indicator:
184
199
  log_level = logging.INFO
185
- logger.log(log_level, "Solver succeeded with status {} ({}).".format(
186
- return_status, wall_clock_time))
200
+ logger.log(
201
+ log_level,
202
+ "Solver succeeded with status {} ({}).".format(return_status, wall_clock_time),
203
+ )
187
204
  except (AttributeError, ValueError):
188
- logger.log(log_level, "Solver succeeded with status {} ({}).".format(
189
- return_status, wall_clock_time))
205
+ logger.log(
206
+ log_level,
207
+ "Solver succeeded with status {} ({}).".format(return_status, wall_clock_time),
208
+ )
190
209
 
191
210
  # Do any postprocessing
192
211
  if postprocessing:
193
212
  self.post()
194
213
  else:
195
- logger.debug(
196
- 'Skipping Postprocessing in OptimizationProblem.optimize()')
214
+ logger.debug("Skipping Postprocessing in OptimizationProblem.optimize()")
197
215
 
198
216
  # Done
199
217
  logger.info("Done with optimize()")
@@ -201,18 +219,23 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
201
219
  return success
202
220
 
203
221
  def __check_bounds_control_input(self) -> None:
204
- # Checks if at the control inputs have bounds, log warning when a control input is not bounded.
222
+ # Checks if at the control inputs have bounds, log warning when a control input is not
223
+ # bounded.
205
224
  bounds = self.bounds()
206
225
 
207
- for variable in self.dae_variables['control_inputs']:
226
+ for variable in self.dae_variables["control_inputs"]:
208
227
  variable = variable.name()
209
228
  if variable not in bounds:
210
229
  logger.warning(
211
- "OptimizationProblem: control input {} has no bounds.".format(variable))
230
+ "OptimizationProblem: control input {} has no bounds.".format(variable)
231
+ )
212
232
 
213
233
  @abstractmethod
214
- def transcribe(self) -> Tuple[
215
- np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[str, ca.MX]]:
234
+ def transcribe(
235
+ self,
236
+ ) -> Tuple[
237
+ np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[str, ca.MX]
238
+ ]:
216
239
  """
217
240
  Transcribe the continuous optimization problem to a discretized, solver-ready
218
241
  optimization problem.
@@ -223,34 +246,35 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
223
246
  """
224
247
  Returns a dictionary of CasADi optimization problem solver options.
225
248
 
226
- The default solver for continuous problems is `Ipopt <https://projects.coin-or.org/Ipopt/>`_.
227
- The default solver for mixed integer problems is `Bonmin <http://projects.coin-or.org/Bonmin/>`_.
249
+ The default solver for continuous problems is `Ipopt
250
+ <https://projects.coin-or.org/Ipopt/>`_.
251
+
252
+ The default solver for mixed integer problems is `Bonmin
253
+ <http://projects.coin-or.org/Bonmin/>`_.
228
254
 
229
255
  :returns: A dictionary of solver options. See the CasADi and
230
256
  respective solver documentation for details.
231
257
  """
232
- options = {'error_on_fail': False,
233
- 'optimized_num_dir': 3,
234
- 'casadi_solver': ca.nlpsol}
258
+ options = {"error_on_fail": False, "optimized_num_dir": 3, "casadi_solver": ca.nlpsol}
235
259
 
236
260
  if self.__mixed_integer:
237
- options['solver'] = 'bonmin'
261
+ options["solver"] = "bonmin"
238
262
 
239
- bonmin_options = options['bonmin'] = {}
240
- bonmin_options['algorithm'] = 'B-BB'
241
- bonmin_options['nlp_solver'] = 'Ipopt'
242
- bonmin_options['nlp_log_level'] = 2
243
- bonmin_options['linear_solver'] = 'mumps'
263
+ bonmin_options = options["bonmin"] = {}
264
+ bonmin_options["algorithm"] = "B-BB"
265
+ bonmin_options["nlp_solver"] = "Ipopt"
266
+ bonmin_options["nlp_log_level"] = 2
267
+ bonmin_options["linear_solver"] = "mumps"
244
268
  else:
245
- options['solver'] = 'ipopt'
269
+ options["solver"] = "ipopt"
246
270
 
247
- ipopt_options = options['ipopt'] = {}
248
- ipopt_options['linear_solver'] = 'mumps'
271
+ ipopt_options = options["ipopt"] = {}
272
+ ipopt_options["linear_solver"] = "mumps"
249
273
  return options
250
274
 
251
- def solver_success(self,
252
- solver_stats: Dict[str, Union[str, bool]],
253
- log_solver_failure_as_error: bool) -> Tuple[bool, int]:
275
+ def solver_success(
276
+ self, solver_stats: Dict[str, Union[str, bool]], log_solver_failure_as_error: bool
277
+ ) -> Tuple[bool, int]:
254
278
  """
255
279
  Translates the returned solver statistics into a boolean and log level
256
280
  to indicate whether the solve was succesful, and how to log it.
@@ -276,13 +300,15 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
276
300
  For example, this can happen when too many goals are specified, and
277
301
  lower priority goals cannot improve further on the current result.
278
302
 
279
- :returns: A tuple indicating whether or not the solver has succeeded, and what level to log it with.
303
+ :returns: A tuple indicating whether or not the solver has succeeded, and what level to log
304
+ it with.
280
305
  """
281
- success = solver_stats['success']
306
+ success = solver_stats["success"]
282
307
  log_level = logging.INFO if success else logging.ERROR
283
308
 
284
- if (self.solver_options()['solver'].lower() in ['bonmin', 'ipopt']
285
- and solver_stats['return_status'] in ['Not_Enough_Degrees_Of_Freedom']):
309
+ if self.solver_options()["solver"].lower() in ["bonmin", "ipopt"] and solver_stats[
310
+ "return_status"
311
+ ] in ["Not_Enough_Degrees_Of_Freedom"]:
286
312
  log_level = logging.WARNING
287
313
 
288
314
  if log_level == logging.ERROR and not log_solver_failure_as_error:
@@ -395,8 +421,8 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
395
421
 
396
422
  def delayed_feedback(self) -> List[Tuple[str, str, float]]:
397
423
  """
398
- Returns the delayed feedback mappings. These are given as a list of triples :math:`(x, y, \\tau)`,
399
- to indicate that :math:`y = x(t - \\tau)`.
424
+ Returns the delayed feedback mappings. These are given as a list of triples
425
+ :math:`(x, y, \\tau)`, to indicate that :math:`y = x(t - \\tau)`.
400
426
 
401
427
  :returns: A list of triples.
402
428
 
@@ -515,7 +541,10 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
515
541
  elif isinstance(v1, np.ndarray) and isinstance(v2, Timeseries):
516
542
  if v2.values.ndim != 2 or len(v1) != v2.values.shape[1]:
517
543
  raise Exception(
518
- "Mismatching vector size when upcasting to Timeseries, {} vs. {}.".format(v1, v2))
544
+ "Mismatching vector size when upcasting to Timeseries, {} vs. {}.".format(
545
+ v1, v2
546
+ )
547
+ )
519
548
  all_bounds[i] = Timeseries(v2.times, np.broadcast_to(v1, v2.values.shape))
520
549
  elif isinstance(v1, (int, float)) and isinstance(v2, np.ndarray):
521
550
  all_bounds[i] = np.full_like(v2, v1)
@@ -582,7 +611,8 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
582
611
 
583
612
  :param ensemble_member: The ensemble member index.
584
613
 
585
- :returns: A dictionary of variable names and historical time series (up to and including t0).
614
+ :returns:
615
+ A dictionary of variable names and historical time series (up to and including t0).
586
616
  """
587
617
  return AliasDict(self.alias_relation)
588
618
 
@@ -639,7 +669,8 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
639
669
  """
640
670
  The objective function for the given ensemble member.
641
671
 
642
- Call :func:`OptimizationProblem.state_at` to return a symbol representing a model variable at a given time.
672
+ Call :func:`OptimizationProblem.state_at` to return a symbol representing a model variable
673
+ at a given time.
643
674
 
644
675
  :param ensemble_member: The ensemble member index.
645
676
 
@@ -677,12 +708,14 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
677
708
  """
678
709
  return ca.MX(0)
679
710
 
680
- def constraints(self, ensemble_member: int) -> List[
681
- Tuple[ca.MX, Union[float, np.ndarray], Union[float, np.ndarray]]]:
711
+ def constraints(
712
+ self, ensemble_member: int
713
+ ) -> List[Tuple[ca.MX, Union[float, np.ndarray], Union[float, np.ndarray]]]:
682
714
  """
683
715
  Returns a list of constraints for the given ensemble member.
684
716
 
685
- Call :func:`OptimizationProblem.state_at` to return a symbol representing a model variable at a given time.
717
+ Call :func:`OptimizationProblem.state_at` to return a symbol representing a model variable
718
+ at a given time.
686
719
 
687
720
  :param ensemble_member: The ensemble member index.
688
721
 
@@ -705,8 +738,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
705
738
  """
706
739
  return []
707
740
 
708
- def path_constraints(self, ensemble_member: int) -> List[
709
- Tuple[ca.MX, Union[float, np.ndarray], Union[float, np.ndarray]]]:
741
+ def path_constraints(
742
+ self, ensemble_member: int
743
+ ) -> List[Tuple[ca.MX, Union[float, np.ndarray], Union[float, np.ndarray]]]:
710
744
  """
711
745
  Returns a list of path constraints.
712
746
 
@@ -751,13 +785,14 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
751
785
  INTERPOLATION_PIECEWISE_CONSTANT_BACKWARD = 2
752
786
 
753
787
  def interpolate(
754
- self,
755
- t: Union[float, np.ndarray],
756
- ts: np.ndarray,
757
- fs: np.ndarray,
758
- f_left: float = np.nan,
759
- f_right: float = np.nan,
760
- mode: int = INTERPOLATION_LINEAR) -> Union[float, np.ndarray]:
788
+ self,
789
+ t: Union[float, np.ndarray],
790
+ ts: np.ndarray,
791
+ fs: np.ndarray,
792
+ f_left: float = np.nan,
793
+ f_right: float = np.nan,
794
+ mode: int = INTERPOLATION_LINEAR,
795
+ ) -> Union[float, np.ndarray]:
761
796
  """
762
797
  Linear interpolation over time.
763
798
 
@@ -779,9 +814,11 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
779
814
  # Early termination; nothing to interpolate
780
815
  return fs.copy()
781
816
 
782
- fs_int = [self.interpolate(t, ts, fs[:, i], f_left, f_right, mode) for i in range(fs.shape[1])]
817
+ fs_int = [
818
+ self.interpolate(t, ts, fs[:, i], f_left, f_right, mode) for i in range(fs.shape[1])
819
+ ]
783
820
  return np.stack(fs_int, axis=1)
784
- elif hasattr(t, '__iter__'):
821
+ elif hasattr(t, "__iter__"):
785
822
  if len(t) == len(ts) and np.all(t == ts):
786
823
  # Early termination; nothing to interpolate
787
824
  return fs.copy()
@@ -807,26 +844,26 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
807
844
  :param f_right: Function value right of rightmost time stamp.
808
845
  :param mode: Interpolation mode.
809
846
 
810
- Note that it is assumed that `ts` is sorted. No such assumption is made for `t`
811
- .
847
+ Note that it is assumed that `ts` is sorted. No such assumption is made for `t`.
848
+
812
849
  :returns: The interpolated value.
813
850
  """
814
851
 
815
852
  if f_left is None:
816
- if (min(t) if hasattr(t, '__iter__') else t) < ts[0]:
853
+ if (min(t) if hasattr(t, "__iter__") else t) < ts[0]:
817
854
  raise Exception("Interpolation: Point {} left of range".format(t))
818
855
 
819
856
  if f_right is None:
820
- if (max(t) if hasattr(t, '__iter__') else t) > ts[-1]:
857
+ if (max(t) if hasattr(t, "__iter__") else t) > ts[-1]:
821
858
  raise Exception("Interpolation: Point {} right of range".format(t))
822
859
 
823
860
  if mode == self.INTERPOLATION_LINEAR:
824
861
  # No need to handle f_left / f_right; NumPy already does this for us
825
862
  return np.interp(t, ts, fs, f_left, f_right)
826
863
  elif mode == self.INTERPOLATION_PIECEWISE_CONSTANT_FORWARD:
827
- v = fs[np.maximum(np.searchsorted(ts, t, side='right') - 1, 0)]
864
+ v = fs[np.maximum(np.searchsorted(ts, t, side="right") - 1, 0)]
828
865
  elif mode == self.INTERPOLATION_PIECEWISE_CONSTANT_BACKWARD:
829
- v = fs[np.minimum(np.searchsorted(ts, t, side='left'), len(ts) - 1)]
866
+ v = fs[np.minimum(np.searchsorted(ts, t, side="left"), len(ts) - 1)]
830
867
  else:
831
868
  raise NotImplementedError
832
869
 
@@ -850,8 +887,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
850
887
  pass
851
888
 
852
889
  @abstractmethod
853
- def discretize_controls(self, resolved_bounds: AliasDict) -> Tuple[
854
- int, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
890
+ def discretize_controls(
891
+ self, resolved_bounds: AliasDict
892
+ ) -> Tuple[int, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
855
893
  """
856
894
  Performs the discretization of the control inputs, filling lower and upper
857
895
  bound vectors for the resulting optimization variables, as well as an initial guess.
@@ -914,7 +952,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
914
952
  return self.variable(variable)
915
953
 
916
954
  @abstractmethod
917
- def control_at(self, variable: str, t: float, ensemble_member: int = 0, scaled: bool = False) -> ca.MX:
955
+ def control_at(
956
+ self, variable: str, t: float, ensemble_member: int = 0, scaled: bool = False
957
+ ) -> ca.MX:
918
958
  """
919
959
  Returns an :class:`MX` symbol representing the given control input at the given time.
920
960
 
@@ -944,8 +984,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
944
984
  pass
945
985
 
946
986
  @abstractmethod
947
- def discretize_states(self, resolved_bounds: AliasDict) -> Tuple[
948
- int, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
987
+ def discretize_states(
988
+ self, resolved_bounds: AliasDict
989
+ ) -> Tuple[int, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
949
990
  """
950
991
  Perform the discretization of the states.
951
992
 
@@ -1002,7 +1043,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1002
1043
  return self.variable(variable)
1003
1044
 
1004
1045
  @abstractmethod
1005
- def state_at(self, variable: str, t: float, ensemble_member: int = 0, scaled: bool = False) -> ca.MX:
1046
+ def state_at(
1047
+ self, variable: str, t: float, ensemble_member: int = 0, scaled: bool = False
1048
+ ) -> ca.MX:
1006
1049
  """
1007
1050
  Returns an :class:`MX` symbol representing the given variable at the given time.
1008
1051
 
@@ -1032,7 +1075,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1032
1075
  pass
1033
1076
 
1034
1077
  @abstractmethod
1035
- def states_in(self, variable: str, t0: float = None, tf: float = None, ensemble_member: int = 0) -> Iterator[ca.MX]:
1078
+ def states_in(
1079
+ self, variable: str, t0: float = None, tf: float = None, ensemble_member: int = 0
1080
+ ) -> Iterator[ca.MX]:
1036
1081
  """
1037
1082
  Iterates over symbols for states in the interval [t0, tf].
1038
1083
 
@@ -1046,7 +1091,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1046
1091
  pass
1047
1092
 
1048
1093
  @abstractmethod
1049
- def integral(self, variable: str, t0: float = None, tf: float = None, ensemble_member: int = 0) -> ca.MX:
1094
+ def integral(
1095
+ self, variable: str, t0: float = None, tf: float = None, ensemble_member: int = 0
1096
+ ) -> ca.MX:
1050
1097
  """
1051
1098
  Returns an expression for the integral over the interval [t0, tf].
1052
1099
 
@@ -1104,12 +1151,13 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1104
1151
  raise NotImplementedError
1105
1152
 
1106
1153
  def set_timeseries(
1107
- self,
1108
- variable: str,
1109
- timeseries: Timeseries,
1110
- ensemble_member: int = 0,
1111
- output: bool = True,
1112
- check_consistency: bool = True) -> None:
1154
+ self,
1155
+ variable: str,
1156
+ timeseries: Timeseries,
1157
+ ensemble_member: int = 0,
1158
+ output: bool = True,
1159
+ check_consistency: bool = True,
1160
+ ) -> None:
1113
1161
  """
1114
1162
  Sets a timeseries in the internal data store.
1115
1163
 
@@ -1149,19 +1197,19 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1149
1197
 
1150
1198
  @debug_check(DebugLevel.HIGH)
1151
1199
  def __debug_check_linearity_constraints(self, nlp):
1152
- x = nlp['x']
1153
- f = nlp['f']
1154
- g = nlp['g']
1200
+ x = nlp["x"]
1201
+ f = nlp["f"]
1202
+ g = nlp["g"]
1155
1203
 
1156
- expand_f_g = ca.Function('f_g', [x], [f, g]).expand()
1157
- X_sx = ca.SX.sym('X', *x.shape)
1204
+ expand_f_g = ca.Function("f_g", [x], [f, g]).expand()
1205
+ X_sx = ca.SX.sym("X", *x.shape)
1158
1206
  f_sx, g_sx = expand_f_g(X_sx)
1159
1207
 
1160
- jac = ca.Function('j', [X_sx], [ca.jacobian(g_sx, X_sx)]).expand()
1208
+ jac = ca.Function("j", [X_sx], [ca.jacobian(g_sx, X_sx)]).expand()
1161
1209
  if jac(np.nan).is_regular():
1162
1210
  logger.info("The constraints are linear")
1163
1211
  else:
1164
- hes = ca.Function('j', [X_sx], [ca.jacobian(ca.jacobian(g_sx, X_sx), X_sx)]).expand()
1212
+ hes = ca.Function("j", [X_sx], [ca.jacobian(ca.jacobian(g_sx, X_sx), X_sx)]).expand()
1165
1213
  if hes(np.nan).is_regular():
1166
1214
  logger.info("The constraints are quadratic")
1167
1215
  else:
@@ -1169,12 +1217,12 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1169
1217
 
1170
1218
  @debug_check(DebugLevel.VERYHIGH)
1171
1219
  def __debug_check_linear_independence(self, lbx, ubx, lbg, ubg, nlp):
1172
- x = nlp['x']
1173
- f = nlp['f']
1174
- g = nlp['g']
1220
+ x = nlp["x"]
1221
+ f = nlp["f"]
1222
+ g = nlp["g"]
1175
1223
 
1176
- expand_f_g = ca.Function('f_g', [x], [f, g]).expand()
1177
- x_sx = ca.SX.sym('X', *x.shape)
1224
+ expand_f_g = ca.Function("f_g", [x], [f, g]).expand()
1225
+ x_sx = ca.SX.sym("X", *x.shape)
1178
1226
  f_sx, g_sx = expand_f_g(x_sx)
1179
1227
 
1180
1228
  x, f, g = x_sx, f_sx, g_sx
@@ -1183,18 +1231,18 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1183
1231
  ubg = np.array(ca.vertsplit(ca.veccat(*ubg))).ravel()
1184
1232
 
1185
1233
  # Find the linear constraints
1186
- g_sjac = ca.Function('Af', [x], [ca.jtimes(g, x, x.ones(*x.shape))])
1234
+ g_sjac = ca.Function("Af", [x], [ca.jtimes(g, x, x.ones(*x.shape))])
1187
1235
 
1188
1236
  res = g_sjac(np.nan)
1189
1237
  res = np.array(res).ravel()
1190
1238
  g_is_linear = ~np.isnan(res)
1191
1239
 
1192
1240
  # Find the rows in the jacobian with only a single entry
1193
- g_jac_csr = ca.DM(ca.Function('tmp', [x], [g]).sparsity_jac(0, 0)).tocsc().tocsr()
1194
- g_single_variable = (np.diff(g_jac_csr.indptr) == 1)
1241
+ g_jac_csr = ca.DM(ca.Function("tmp", [x], [g]).sparsity_jac(0, 0)).tocsc().tocsr()
1242
+ g_single_variable = np.diff(g_jac_csr.indptr) == 1
1195
1243
 
1196
1244
  # Find the rows which are equality constraints
1197
- g_eq_constraint = (lbg == ubg)
1245
+ g_eq_constraint = lbg == ubg
1198
1246
 
1199
1247
  # The intersection of all selections are constraints like we want
1200
1248
  g_constant_assignment = g_is_linear & g_single_variable & g_eq_constraint
@@ -1210,7 +1258,11 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1210
1258
 
1211
1259
  for vi, g_inds in var_index_assignment.items():
1212
1260
  if len(g_inds) > 1:
1213
- logger.info("Variable '{}' has duplicate constraints setting its value:".format(var_names[vi]))
1261
+ logger.info(
1262
+ "Variable '{}' has duplicate constraints setting its value:".format(
1263
+ var_names[vi]
1264
+ )
1265
+ )
1214
1266
  for g_i in g_inds:
1215
1267
  logger.info("row {}: {} = {}".format(g_i, named_g[g_i], lbg[g_i]))
1216
1268
 
@@ -1221,7 +1273,9 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
1221
1273
 
1222
1274
  for vi in x_inds:
1223
1275
  if vi in var_index_assignment:
1224
- logger.info("Variable '{}' has equal bounds (value = {}), but also the following equality constraints:"
1225
- .format(var_names[vi], lbx[vi]))
1276
+ logger.info(
1277
+ "Variable '{}' has equal bounds (value = {}), "
1278
+ "but also the following equality constraints:".format(var_names[vi], lbx[vi])
1279
+ )
1226
1280
  for g_i in var_index_assignment[vi]:
1227
1281
  logger.info("row {}: {} = {}".format(g_i, named_g[g_i], lbg[g_i]))