rtc-tools 2.7.0.dev2__py3-none-any.whl → 2.8.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rtc-tools might be problematic. Click here for more details.

Files changed (35) hide show
  1. rtc_tools-2.8.0a1.dist-info/METADATA +279 -0
  2. rtc_tools-2.8.0a1.dist-info/RECORD +52 -0
  3. {rtc_tools-2.7.0.dev2.dist-info → rtc_tools-2.8.0a1.dist-info}/WHEEL +1 -2
  4. rtc_tools-2.8.0a1.dist-info/licenses/AUTHORS +10 -0
  5. rtc_tools-2.8.0a1.dist-info/licenses/COPYING +674 -0
  6. rtctools/__init__.py +2 -4
  7. rtctools/_internal/casadi_helpers.py +13 -5
  8. rtctools/_internal/ensemble_bounds_decorator.py +71 -0
  9. rtctools/data/netcdf.py +16 -15
  10. rtctools/data/pi.py +5 -2
  11. rtctools/data/rtc.py +3 -3
  12. rtctools/optimization/collocated_integrated_optimization_problem.py +75 -29
  13. rtctools/optimization/control_tree_mixin.py +9 -6
  14. rtctools/optimization/csv_lookup_table_mixin.py +5 -3
  15. rtctools/optimization/csv_mixin.py +3 -0
  16. rtctools/optimization/goal_programming_mixin.py +11 -5
  17. rtctools/optimization/goal_programming_mixin_base.py +29 -4
  18. rtctools/optimization/io_mixin.py +11 -5
  19. rtctools/optimization/min_abs_goal_programming_mixin.py +9 -3
  20. rtctools/optimization/modelica_mixin.py +23 -10
  21. rtctools/optimization/optimization_problem.py +55 -13
  22. rtctools/optimization/pi_mixin.py +3 -3
  23. rtctools/optimization/single_pass_goal_programming_mixin.py +9 -3
  24. rtctools/rtctoolsapp.py +15 -13
  25. rtctools/simulation/io_mixin.py +1 -1
  26. rtctools/simulation/pi_mixin.py +3 -3
  27. rtctools/simulation/simulation_problem.py +25 -12
  28. rtctools/util.py +1 -0
  29. rtctools/version.py +51 -0
  30. rtc_tools-2.7.0.dev2.dist-info/METADATA +0 -51
  31. rtc_tools-2.7.0.dev2.dist-info/RECORD +0 -50
  32. rtc_tools-2.7.0.dev2.dist-info/top_level.txt +0 -1
  33. rtctools/_version.py +0 -21
  34. {rtc_tools-2.7.0.dev2.dist-info → rtc_tools-2.8.0a1.dist-info}/entry_points.txt +0 -0
  35. {rtc_tools-2.7.0.dev2.dist-info → rtc_tools-2.8.0a1.dist-info/licenses}/COPYING.LESSER +0 -0
@@ -5,12 +5,12 @@ import casadi as ca
5
5
  logger = logging.getLogger("rtctools")
6
6
 
7
7
 
8
- def is_affine(e, v):
8
+ def is_affine(expr, symbols):
9
9
  try:
10
- Af = ca.Function("f", [v], [ca.jacobian(e, v)]).expand()
11
- except RuntimeError as e:
12
- if "'eval_sx' not defined for" in str(e):
13
- Af = ca.Function("f", [v], [ca.jacobian(e, v)])
10
+ Af = ca.Function("f", [symbols], [ca.jacobian(expr, symbols)]).expand()
11
+ except RuntimeError as error:
12
+ if "'eval_sx' not defined for" in str(error):
13
+ Af = ca.Function("f", [symbols], [ca.jacobian(expr, symbols)])
14
14
  else:
15
15
  raise
16
16
  return Af.sparsity_jac(0, 0).nnz() == 0
@@ -52,4 +52,12 @@ def interpolate(ts, xs, t, equidistant, mode=0):
52
52
  mode_str = "floor"
53
53
  else:
54
54
  mode_str = "ceil"
55
+
56
+ # CasADi fails if there is just a single point. Just "extrapolate" based on
57
+ # that point, just as CasADi would do for entries in 't' outside the range
58
+ # of 'ts'.
59
+ if len(ts) == 1:
60
+ assert xs.size1() == 1
61
+ return ca.vertcat(*[xs] * len(t))
62
+
55
63
  return ca.interp1d(ts, xs, t, mode_str, equidistant)
@@ -0,0 +1,71 @@
1
+ import functools
2
+ import inspect
3
+
4
+
5
+ def ensemble_bounds_check(func):
6
+ """
7
+ Decorator for bounds() methods that enforces ensemble_member parameter handling
8
+ based on the ensemble_specific_bounds feature flag.
9
+
10
+ When ensemble_specific_bounds is True:
11
+ - ensemble_member parameter must be passed (and must be integer)
12
+
13
+ When ensemble_specific_bounds is False:
14
+ - ensemble_member parameter must NOT be passed
15
+
16
+ Raises appropriate TypeErrors with feature flag context.
17
+ """
18
+
19
+ @functools.wraps(func)
20
+ def wrapper(self, *args, **kwargs):
21
+ # Check that the decorator is used on a method that matches the
22
+ # expected signature
23
+ sig = inspect.signature(func)
24
+
25
+ has_ensemble_member_param = "ensemble_member" in sig.parameters
26
+
27
+ if not has_ensemble_member_param:
28
+ raise RuntimeError(
29
+ f"bounds() method {func.__qualname__} must have 'ensemble_member' parameter. "
30
+ f"Expected signature: def bounds(self, ensemble_member: Optional[int] = None)"
31
+ )
32
+
33
+ # Determine if ensemble_member was provided in the call
34
+ ensemble_member_provided = False
35
+ ensemble_member_value = None
36
+
37
+ if args:
38
+ # ensemble_member was passed as positional argument
39
+ ensemble_member_provided = True
40
+ ensemble_member_value = args[0]
41
+ elif "ensemble_member" in kwargs:
42
+ # ensemble_member was passed as keyword argument
43
+ ensemble_member_provided = True
44
+ ensemble_member_value = kwargs["ensemble_member"]
45
+
46
+ # Check feature flag and enforce rules
47
+ if self.ensemble_specific_bounds:
48
+ # Feature flag is ON - ensemble_member should be passed
49
+ if not ensemble_member_provided:
50
+ raise TypeError(
51
+ f"{func.__name__}() missing 1 required positional argument: 'ensemble_member'. "
52
+ f"This is required when the 'ensemble_specific_bounds' feature flag is enabled."
53
+ )
54
+ if ensemble_member_provided and not isinstance(ensemble_member_value, int):
55
+ raise TypeError(
56
+ f"ensemble_member must be an int, got {type(ensemble_member_value).__name__}"
57
+ f"This is required when the 'ensemble_specific_bounds' feature flag is enabled."
58
+ )
59
+ else:
60
+ # Feature flag is OFF - ensemble_member should NOT be passed. Not even None.
61
+ if ensemble_member_provided:
62
+ raise TypeError(
63
+ f"{func.__name__}() takes 1 positional argument but 2 were given. "
64
+ f"The 'ensemble_member' parameter should not be provided when the "
65
+ f"'ensemble_specific_bounds' feature flag is disabled."
66
+ )
67
+
68
+ # Call the original function
69
+ return func(self, *args, **kwargs)
70
+
71
+ return wrapper
rtctools/data/netcdf.py CHANGED
@@ -401,20 +401,21 @@ class ExportDataset:
401
401
  """
402
402
  assert len(set(variable_names)) == len(variable_names)
403
403
 
404
- assert (
405
- self.__time_dim is not None
406
- ), "First call write_times to ensure the time dimension has been created."
407
- assert (
408
- self.__station_dim is not None
409
- ), "First call write_station_data to ensure the station dimension has been created"
404
+ assert self.__time_dim is not None, (
405
+ "First call write_times to ensure the time dimension has been created."
406
+ )
407
+ assert self.__station_dim is not None, (
408
+ "First call write_station_data to ensure the station dimension has been created"
409
+ )
410
410
  assert (
411
411
  self.__station_id_to_index_mapping is not None
412
412
  ) # should also be created in write_station_data
413
413
 
414
414
  if ensemble_size > 1:
415
- assert (
416
- self.__ensemble_member_dim is not None
417
- ), "First call write_ensemble_data to ensure the realization dimension has been created"
415
+ assert self.__ensemble_member_dim is not None, (
416
+ "First call write_ensemble_data to ensure "
417
+ "the realization dimension has been created"
418
+ )
418
419
 
419
420
  for variable_name in variable_names:
420
421
  self.__dataset.createVariable(
@@ -446,15 +447,15 @@ class ExportDataset:
446
447
  :param values: The values that are to be written to the file
447
448
  :param ensemble_size: the number of members in the ensemble
448
449
  """
449
- assert (
450
- self.__station_id_to_index_mapping is not None
451
- ), "First call write_station_data and create_variables."
450
+ assert self.__station_id_to_index_mapping is not None, (
451
+ "First call write_station_data and create_variables."
452
+ )
452
453
 
453
454
  station_index = self.__station_id_to_index_mapping[station_id]
454
455
  if ensemble_size > 1:
455
- self.__dataset.variables[variable_name][
456
- :, station_index, ensemble_member_index
457
- ] = values
456
+ self.__dataset.variables[variable_name][:, station_index, ensemble_member_index] = (
457
+ values
458
+ )
458
459
  else:
459
460
  self.__dataset.variables[variable_name][:, station_index] = values
460
461
 
rtctools/data/pi.py CHANGED
@@ -333,8 +333,11 @@ class ParameterConfig:
333
333
 
334
334
  parameters = group.findall("pi:parameter", ns)
335
335
  for parameter in parameters:
336
- yield location_id, model_id, parameter.attrib["id"], self.__parse_parameter(
337
- parameter
336
+ yield (
337
+ location_id,
338
+ model_id,
339
+ parameter.attrib["id"],
340
+ self.__parse_parameter(parameter),
338
341
  )
339
342
 
340
343
 
rtctools/data/rtc.py CHANGED
@@ -60,9 +60,9 @@ class DataConfig:
60
60
  logger.error(message)
61
61
  raise Exception(message)
62
62
  else:
63
- self.__location_parameter_ids[
64
- internal_id
65
- ] = self.__pi_location_parameter_id(pi_timeseries, "fews")
63
+ self.__location_parameter_ids[internal_id] = (
64
+ self.__pi_location_parameter_id(pi_timeseries, "fews")
65
+ )
66
66
  self.__variable_map[external_id] = internal_id
67
67
 
68
68
  for k in ["import", "export"]:
@@ -6,6 +6,7 @@ from typing import Dict, Union
6
6
 
7
7
  import casadi as ca
8
8
  import numpy as np
9
+ from numpy.typing import NDArray
9
10
 
10
11
  from rtctools._internal.alias_tools import AliasDict
11
12
  from rtctools._internal.casadi_helpers import (
@@ -17,7 +18,7 @@ from rtctools._internal.casadi_helpers import (
17
18
  )
18
19
  from rtctools._internal.debug_check_helpers import DebugLevel, debug_check
19
20
 
20
- from .optimization_problem import OptimizationProblem
21
+ from .optimization_problem import BT, OptimizationProblem
21
22
  from .timeseries import Timeseries
22
23
 
23
24
  logger = logging.getLogger("rtctools")
@@ -410,7 +411,10 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
410
411
  if v.ndim == 1:
411
412
  ensemble_data["extra_constant_inputs"][k] = v[:, None]
412
413
 
413
- bounds = self.bounds()
414
+ if self.ensemble_specific_bounds:
415
+ bounds = [self.bounds(ensemble_member=i) for i in range(self.ensemble_size)]
416
+ else:
417
+ bounds = self.bounds()
414
418
 
415
419
  # Initialize control discretization
416
420
  (
@@ -670,7 +674,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
670
674
  for ensemble_member in range(self.ensemble_size)
671
675
  ]
672
676
  if (
673
- len(values) == 1 or (np.all(values) == values[0])
677
+ len(values) == 1 or all(v == values[0] for v in values)
674
678
  ) and parameter.name() not in dynamic_parameter_names:
675
679
  constant_parameters.append(parameter)
676
680
  constant_parameter_values.append(values[0])
@@ -898,11 +902,11 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
898
902
  function_options,
899
903
  )
900
904
 
905
+ # Expand the residual function if possible.
901
906
  try:
902
907
  dae_residual_function_integrated = dae_residual_function_integrated.expand()
903
908
  except RuntimeError as e:
904
- # We only expect to fail if the DAE was an external function
905
- if "'eval_sx' not defined for External" in str(e):
909
+ if "'eval_sx' not defined for" in str(e):
906
910
  pass
907
911
  else:
908
912
  raise
@@ -933,13 +937,13 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
933
937
  [dae_residual_collocated],
934
938
  function_options,
935
939
  )
940
+ # Expand the residual function if possible.
936
941
  try:
937
942
  self.__dae_residual_function_collocated = (
938
943
  self.__dae_residual_function_collocated.expand()
939
944
  )
940
945
  except RuntimeError as e:
941
- # We only expect to fail if the DAE was an external function
942
- if "'eval_sx' not defined for External" in str(e):
946
+ if "'eval_sx' not defined for" in str(e):
943
947
  pass
944
948
  else:
945
949
  raise
@@ -1028,8 +1032,8 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
1028
1032
  + len(self.dae_variables["constant_inputs"])
1029
1033
  ]
1030
1034
  constant_inputs_1 = accumulated_U[
1031
- 2 * len(collocated_variables)
1032
- + len(self.dae_variables["constant_inputs"]) : 2 * len(collocated_variables)
1035
+ 2 * len(collocated_variables) + len(self.dae_variables["constant_inputs"]) : 2
1036
+ * len(collocated_variables)
1033
1037
  + 2 * len(self.dae_variables["constant_inputs"])
1034
1038
  ]
1035
1039
 
@@ -1803,9 +1807,9 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
1803
1807
  # Cast delay from DM to np.array
1804
1808
  delay = delay.toarray().flatten()
1805
1809
 
1806
- assert np.all(
1807
- np.isfinite(delay)
1808
- ), "Delay duration must be resolvable to real values at transcribe()"
1810
+ assert np.all(np.isfinite(delay)), (
1811
+ "Delay duration must be resolvable to real values at transcribe()"
1812
+ )
1809
1813
 
1810
1814
  out_times = np.concatenate([history_times, collocation_times])
1811
1815
  out_values = ca.veccat(
@@ -2043,9 +2047,12 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2043
2047
  def controls(self):
2044
2048
  return self.__controls
2045
2049
 
2046
- def _collint_get_lbx_ubx(self, count, indices):
2047
- bounds = self.bounds()
2048
-
2050
+ def _collint_get_lbx_ubx(
2051
+ self,
2052
+ bounds: Union[dict[str, BT], list[dict[str, BT]]],
2053
+ count: int,
2054
+ indices: list[dict[str, Union[slice, int]]],
2055
+ ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
2049
2056
  lbx = np.full(count, -np.inf, dtype=np.float64)
2050
2057
  ubx = np.full(count, np.inf, dtype=np.float64)
2051
2058
 
@@ -2056,6 +2063,11 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2056
2063
 
2057
2064
  # Bounds, defaulting to +/- inf, if not set
2058
2065
  for ensemble_member in range(self.ensemble_size):
2066
+ if self.ensemble_specific_bounds:
2067
+ bounds_member = bounds[ensemble_member]
2068
+ else:
2069
+ bounds_member = bounds
2070
+
2059
2071
  for variable, inds in indices[ensemble_member].items():
2060
2072
  variable_size = variable_sizes[variable]
2061
2073
 
@@ -2067,7 +2079,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2067
2079
  n_times = len(times)
2068
2080
 
2069
2081
  try:
2070
- bound = bounds[variable]
2082
+ bound = bounds_member[variable]
2071
2083
  except KeyError:
2072
2084
  pass
2073
2085
  else:
@@ -2096,7 +2108,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2096
2108
  )
2097
2109
  else:
2098
2110
  lower_bound = bound[0]
2099
- lbx[inds] = lower_bound / nominal
2111
+ lbx[inds] = np.maximum(lbx[inds], lower_bound / nominal)
2100
2112
 
2101
2113
  if bound[1] is not None:
2102
2114
  if isinstance(bound[1], Timeseries):
@@ -2116,7 +2128,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2116
2128
  )
2117
2129
  else:
2118
2130
  upper_bound = bound[1]
2119
- ubx[inds] = upper_bound / nominal
2131
+ ubx[inds] = np.minimum(ubx[inds], upper_bound / nominal)
2120
2132
 
2121
2133
  # Warn for NaNs
2122
2134
  if np.any(np.isnan(lbx[inds])):
@@ -2124,6 +2136,19 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2124
2136
  if np.any(np.isnan(ubx[inds])):
2125
2137
  logger.error("Upper bound on variable {} contains NaN".format(variable))
2126
2138
 
2139
+ # Check that the lower bounds are not higher than the upper
2140
+ # bounds. To avoid spam, we just log the first offending one per
2141
+ # variable, not _all_ time steps.
2142
+ if np.any(lbx[inds] > ubx[inds]):
2143
+ error_inds = np.where(lbx[inds] > ubx[inds])[0].tolist()
2144
+ logger.error(
2145
+ "Lower bound {} is higher than upper bound {} for variable {}".format(
2146
+ lbx[inds][error_inds[0]] * nominal,
2147
+ ubx[inds][error_inds[0]] * nominal,
2148
+ variable,
2149
+ )
2150
+ )
2151
+
2127
2152
  return lbx, ubx
2128
2153
 
2129
2154
  def _collint_get_x0(self, count, indices):
@@ -2210,7 +2235,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2210
2235
  count = max(count, control_indices_stop)
2211
2236
 
2212
2237
  discrete = self._collint_get_discrete(count, indices)
2213
- lbx, ubx = self._collint_get_lbx_ubx(count, indices)
2238
+ lbx, ubx = self._collint_get_lbx_ubx(bounds, count, indices)
2214
2239
  x0 = self._collint_get_x0(count, indices)
2215
2240
 
2216
2241
  # Return number of control variables
@@ -2326,7 +2351,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2326
2351
  offset += 1
2327
2352
 
2328
2353
  discrete = self._collint_get_discrete(count, indices)
2329
- lbx, ubx = self._collint_get_lbx_ubx(count, indices)
2354
+ lbx, ubx = self._collint_get_lbx_ubx(bounds, count, indices)
2330
2355
  x0 = self._collint_get_x0(count, indices)
2331
2356
 
2332
2357
  # Return number of state variables
@@ -2610,14 +2635,25 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2610
2635
  else:
2611
2636
  tf = xf = ca.MX()
2612
2637
  t = ca.vertcat(t0, history_times[history_indices], times[indices], tf)
2613
- x = ca.vertcat(x0, history[history_indices], state[indices[0] : indices[-1] + 1], xf)
2638
+ x = ca.vertcat(x0, history[history_indices], state[indices], xf)
2614
2639
 
2615
2640
  return x, t
2616
2641
 
2617
- def states_in(self, variable, t0=None, tf=None, ensemble_member=0):
2618
- x, _ = self.__states_times_in(variable, t0, tf, ensemble_member)
2642
+ def states_in(
2643
+ self,
2644
+ variable: str,
2645
+ t0: float = None,
2646
+ tf: float = None,
2647
+ ensemble_member: int = 0,
2648
+ *,
2649
+ return_times: bool = False,
2650
+ ) -> Union[ca.MX, tuple[ca.DM, ca.MX]]:
2651
+ x, t = self.__states_times_in(variable, t0, tf, ensemble_member)
2619
2652
 
2620
- return x
2653
+ if return_times:
2654
+ return x, t
2655
+ else:
2656
+ return x
2621
2657
 
2622
2658
  def integral(self, variable, t0=None, tf=None, ensemble_member=0):
2623
2659
  x, t = self.__states_times_in(variable, t0, tf, ensemble_member)
@@ -2869,8 +2905,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2869
2905
 
2870
2906
  # Check coefficient matrix
2871
2907
  logger.info(
2872
- "Sanity check on objective and constraints Jacobian matrix"
2873
- "/constant coefficients values"
2908
+ "Sanity check on objective and constraints Jacobian matrix/constant coefficients values"
2874
2909
  )
2875
2910
 
2876
2911
  in_var = nlp["x"]
@@ -2923,8 +2958,19 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
2923
2958
  "{} & {}, {} & {}".format(max_constr_A, min_constr_A, max_constr_b, min_constr_b)
2924
2959
  )
2925
2960
 
2926
- maxs = [x for x in [max_constr_A, max_constr_b, max_obj_A, obj_b] if x is not None]
2927
- mins = [x for x in [min_constr_A, min_constr_b, min_obj_A, obj_b] if x is not None]
2961
+ # Filter out exactly zero, as those entries do not show up in the
2962
+ # matrix. Shut up SonarCloud warning about this exact-to-zero
2963
+ # comparison.
2964
+ maxs = [
2965
+ x
2966
+ for x in [max_constr_A, max_constr_b, max_obj_A, obj_b]
2967
+ if x is not None and x != 0.0 # NOSONAR
2968
+ ]
2969
+ mins = [
2970
+ x
2971
+ for x in [min_constr_A, min_constr_b, min_obj_A, obj_b]
2972
+ if x is not None and x != 0.0 # NOSONAR
2973
+ ]
2928
2974
  if (maxs and max(maxs) > tol_up) or (mins and min(mins) < tol_down):
2929
2975
  logger.info("Jacobian matrix /constants coefficients values outside typical range!")
2930
2976
 
@@ -3113,7 +3159,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
3113
3159
  variable_to_all_indices = {k: set(v) for k, v in indices[0].items()}
3114
3160
  for ensemble_indices in indices[1:]:
3115
3161
  for k, v in ensemble_indices.items():
3116
- variable_to_all_indices[k] |= v
3162
+ variable_to_all_indices[k] |= set(v)
3117
3163
 
3118
3164
  if len(inds_up) > 0:
3119
3165
  exceedences = []
@@ -50,7 +50,7 @@ class ControlTreeMixin(OptimizationProblem):
50
50
  return options
51
51
 
52
52
  def discretize_control(self, variable, ensemble_member, times, offset):
53
- control_indices = np.zeros(len(times), dtype=np.int16)
53
+ control_indices = np.zeros(len(times), dtype=np.int64)
54
54
  for branch, members in self.__branches.items():
55
55
  if ensemble_member not in members:
56
56
  continue
@@ -86,6 +86,11 @@ class ControlTreeMixin(OptimizationProblem):
86
86
  logger.debug("ControlTreeMixin: Branching times:")
87
87
  logger.debug(self.__branching_times)
88
88
 
89
+ # Avoid calling constant_inputs() many times
90
+ constant_inputs = [
91
+ self.constant_inputs(ensemble_member=i) for i in range(self.ensemble_size)
92
+ ]
93
+
89
94
  # Branches start at branching times, so that the tree looks like the following:
90
95
  #
91
96
  # *-----
@@ -122,18 +127,16 @@ class ControlTreeMixin(OptimizationProblem):
122
127
  for forecast_variable in options["forecast_variables"]:
123
128
  # We assume the time stamps of the forecasts in all ensemble
124
129
  # members to be identical
125
- timeseries = self.constant_inputs(ensemble_member=0)[forecast_variable]
130
+ timeseries = constant_inputs[0][forecast_variable]
126
131
  els = np.logical_and(
127
132
  timeseries.times >= branching_time_0, timeseries.times < branching_time_1
128
133
  )
129
134
 
130
135
  # Compute distance between ensemble members
131
136
  for i, member_i in enumerate(branches[current_branch]):
132
- timeseries_i = self.constant_inputs(ensemble_member=member_i)[forecast_variable]
137
+ timeseries_i = constant_inputs[member_i][forecast_variable]
133
138
  for j, member_j in enumerate(branches[current_branch]):
134
- timeseries_j = self.constant_inputs(ensemble_member=member_j)[
135
- forecast_variable
136
- ]
139
+ timeseries_j = constant_inputs[member_j][forecast_variable]
137
140
  distances[i, j] += np.linalg.norm(
138
141
  timeseries_i.values[els] - timeseries_j.values[els]
139
142
  )
@@ -55,7 +55,7 @@ class LookupTable(LookupTableBase):
55
55
  "This lookup table was not instantiated with tck metadata. \
56
56
  Domain/Range information is unavailable."
57
57
  )
58
- if type(t) == tuple and len(t) == 2:
58
+ if isinstance(t, tuple) and len(t) == 2:
59
59
  raise NotImplementedError(
60
60
  "Domain/Range information is not yet implemented for 2D LookupTables"
61
61
  )
@@ -298,8 +298,9 @@ class CSVLookupTableMixin(OptimizationProblem):
298
298
  def check_lookup_table(lookup_table):
299
299
  if lookup_table in self.__lookup_tables:
300
300
  raise Exception(
301
- "Cannot add lookup table {},"
302
- "since there is already one with this name.".format(lookup_table)
301
+ "Cannot add lookup table {},since there is already one with this name.".format(
302
+ lookup_table
303
+ )
303
304
  )
304
305
 
305
306
  # Read CSV files
@@ -358,6 +359,7 @@ class CSVLookupTableMixin(OptimizationProblem):
358
359
  k=k,
359
360
  monotonicity=mono,
360
361
  curvature=curv,
362
+ ipopt_options={"nlp_scaling_method": "none"},
361
363
  )
362
364
  else:
363
365
  raise Exception(
@@ -98,6 +98,9 @@ class CSVMixin(IOMixin):
98
98
  names=True,
99
99
  encoding=None,
100
100
  )
101
+ if len(self.__ensemble.shape) == 0:
102
+ # If there is only one ensemble member, the array is 0-dimensional.
103
+ self.__ensemble = np.expand_dims(self.__ensemble, 0)
101
104
 
102
105
  logger.debug("CSVMixin: Read ensemble description")
103
106
 
@@ -1,12 +1,13 @@
1
1
  import itertools
2
2
  import logging
3
3
  from collections import OrderedDict
4
- from typing import Dict, Union
4
+ from typing import Dict, Optional, Union
5
5
 
6
6
  import casadi as ca
7
7
  import numpy as np
8
8
 
9
9
  from rtctools._internal.alias_tools import AliasDict
10
+ from rtctools._internal.ensemble_bounds_decorator import ensemble_bounds_check
10
11
 
11
12
  from .goal_programming_mixin_base import ( # noqa: F401
12
13
  Goal,
@@ -65,8 +66,12 @@ class GoalProgrammingMixin(_GoalProgrammingMixinBase):
65
66
  def path_variables(self):
66
67
  return self.__problem_path_epsilons + self.__subproblem_path_epsilons
67
68
 
68
- def bounds(self):
69
- bounds = super().bounds()
69
+ @ensemble_bounds_check
70
+ def bounds(self, ensemble_member: Optional[int] = None):
71
+ bounds = (
72
+ super().bounds(ensemble_member) if self.ensemble_specific_bounds else super().bounds()
73
+ )
74
+
70
75
  for epsilon in (
71
76
  self.__subproblem_epsilons
72
77
  + self.__subproblem_path_epsilons
@@ -351,8 +356,9 @@ class GoalProgrammingMixin(_GoalProgrammingMixinBase):
351
356
  if goal.has_target_bounds:
352
357
  # We use a violation variable formulation, with the violation
353
358
  # variables epsilon bounded between 0 and 1.
354
- m, M = np.full_like(epsilon, -np.inf, dtype=np.float64), np.full_like(
355
- epsilon, np.inf, dtype=np.float64
359
+ m, M = (
360
+ np.full_like(epsilon, -np.inf, dtype=np.float64),
361
+ np.full_like(epsilon, np.inf, dtype=np.float64),
356
362
  )
357
363
 
358
364
  # A function range does not have to be specified for critical
@@ -394,7 +394,31 @@ class StateGoal(Goal):
394
394
  # Extract state range from model
395
395
  if self.has_target_bounds:
396
396
  try:
397
- self.function_range = optimization_problem.bounds()[self.state]
397
+ if optimization_problem.ensemble_specific_bounds:
398
+ bounds = optimization_problem.bounds(0)
399
+ bounds_state_ref = bounds[self.state]
400
+ if np.array_equal(self.function_range, (np.nan, np.nan), equal_nan=True):
401
+ # If the user has not set the function range themselves, we
402
+ # try and set it automatically. This is only possible if the
403
+ # bounds are the same for all ensemble members.
404
+ for ensemble_member in range(optimization_problem.ensemble_size):
405
+ bounds_state_ensemble = optimization_problem.bounds(ensemble_member)[
406
+ self.state
407
+ ]
408
+ # First, check if the types are equal, and then check if the values are
409
+ # equal. For Timeseries and floats, we can do `==` comparison, for
410
+ # arrays we need to use np.all. To simplify we wrap the `==` for floats
411
+ # in an `np.all` as well.
412
+ if type(bounds_state_ref) is not type(
413
+ bounds_state_ensemble
414
+ ) or not np.all(bounds_state_ref == bounds_state_ensemble):
415
+ raise ValueError(
416
+ f"Bounds for state {self.state} are not the same for all "
417
+ f"ensemble members; please set the function_range explicitly"
418
+ )
419
+ else:
420
+ bounds = optimization_problem.bounds()
421
+ self.function_range = bounds[self.state]
398
422
  except KeyError:
399
423
  raise Exception(
400
424
  "State {} has no bounds or does not exist in the model.".format(self.state)
@@ -437,7 +461,7 @@ class _GoalConstraint:
437
461
  ):
438
462
  assert isinstance(m, (float, np.ndarray, Timeseries))
439
463
  assert isinstance(M, (float, np.ndarray, Timeseries))
440
- assert type(m) == type(M)
464
+ assert type(m) is type(M)
441
465
 
442
466
  # NumPy arrays only allowed for vector goals
443
467
  if isinstance(m, np.ndarray):
@@ -982,8 +1006,9 @@ class _GoalProgrammingMixinBase(OptimizationProblem, metaclass=ABCMeta):
982
1006
  if goal.has_target_bounds:
983
1007
  # We use a violation variable formulation, with the violation
984
1008
  # variables epsilon bounded between 0 and 1.
985
- m, M = np.full_like(epsilon, -np.inf, dtype=np.float64), np.full_like(
986
- epsilon, np.inf, dtype=np.float64
1009
+ m, M = (
1010
+ np.full_like(epsilon, -np.inf, dtype=np.float64),
1011
+ np.full_like(epsilon, np.inf, dtype=np.float64),
987
1012
  )
988
1013
 
989
1014
  # A function range does not have to be specified for critical
@@ -2,11 +2,13 @@ import bisect
2
2
  import logging
3
3
  import warnings
4
4
  from abc import ABCMeta, abstractmethod
5
+ from typing import Optional
5
6
 
6
7
  import casadi as ca
7
8
  import numpy as np
8
9
 
9
10
  from rtctools._internal.caching import cached
11
+ from rtctools._internal.ensemble_bounds_decorator import ensemble_bounds_check
10
12
  from rtctools.optimization.optimization_problem import OptimizationProblem
11
13
  from rtctools.optimization.timeseries import Timeseries
12
14
 
@@ -174,9 +176,13 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
174
176
  return "_".join((variable, "Max"))
175
177
 
176
178
  @cached
177
- def bounds(self):
178
- # Call parent class first for default values.
179
- bounds = super().bounds()
179
+ @ensemble_bounds_check
180
+ def bounds(self, ensemble_member: Optional[int] = None):
181
+ bounds = (
182
+ super().bounds(ensemble_member) if self.ensemble_specific_bounds else super().bounds()
183
+ )
184
+
185
+ ensemble_member = ensemble_member if self.ensemble_specific_bounds else 0
180
186
 
181
187
  io_times = self.io.times_sec
182
188
  t_pos = bisect.bisect_left(io_times, self.initial_time)
@@ -189,7 +195,7 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
189
195
 
190
196
  timeseries_id = self.min_timeseries_id(variable_name)
191
197
  try:
192
- _, values = self.io.get_timeseries_sec(timeseries_id, 0)
198
+ _, values = self.io.get_timeseries_sec(timeseries_id, ensemble_member)
193
199
  m = values[t_pos:]
194
200
  except KeyError:
195
201
  pass
@@ -199,7 +205,7 @@ class IOMixin(OptimizationProblem, metaclass=ABCMeta):
199
205
 
200
206
  timeseries_id = self.max_timeseries_id(variable_name)
201
207
  try:
202
- _, values = self.io.get_timeseries_sec(timeseries_id, 0)
208
+ _, values = self.io.get_timeseries_sec(timeseries_id, ensemble_member)
203
209
  M = values[t_pos:]
204
210
  except KeyError:
205
211
  pass