rtc-tools 2.6.1__py3-none-any.whl → 2.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rtc-tools might be problematic. Click here for more details.
- {rtc_tools-2.6.1.dist-info → rtc_tools-2.7.0.dist-info}/METADATA +26 -16
- rtc_tools-2.7.0.dist-info/RECORD +50 -0
- {rtc_tools-2.6.1.dist-info → rtc_tools-2.7.0.dist-info}/WHEEL +1 -1
- {rtc_tools-2.6.1.dist-info → rtc_tools-2.7.0.dist-info}/entry_points.txt +0 -1
- rtctools/_internal/casadi_helpers.py +5 -5
- rtctools/_version.py +4 -4
- rtctools/data/csv.py +18 -7
- rtctools/data/interpolation/bspline1d.py +5 -1
- rtctools/data/netcdf.py +16 -15
- rtctools/data/pi.py +61 -27
- rtctools/data/rtc.py +3 -3
- rtctools/optimization/collocated_integrated_optimization_problem.py +14 -17
- rtctools/optimization/control_tree_mixin.py +8 -5
- rtctools/optimization/csv_lookup_table_mixin.py +5 -3
- rtctools/optimization/csv_mixin.py +3 -0
- rtctools/optimization/goal_programming_mixin.py +11 -2
- rtctools/optimization/goal_programming_mixin_base.py +5 -3
- rtctools/optimization/modelica_mixin.py +28 -8
- rtctools/optimization/optimization_problem.py +18 -0
- rtctools/optimization/pi_mixin.py +13 -0
- rtctools/rtctoolsapp.py +15 -13
- rtctools/simulation/io_mixin.py +1 -1
- rtctools/simulation/pi_mixin.py +13 -0
- rtctools/simulation/simulation_problem.py +130 -22
- rtctools/util.py +1 -0
- rtc_tools-2.6.1.dist-info/RECORD +0 -50
- {rtc_tools-2.6.1.dist-info → rtc_tools-2.7.0.dist-info/licenses}/COPYING.LESSER +0 -0
- {rtc_tools-2.6.1.dist-info → rtc_tools-2.7.0.dist-info}/top_level.txt +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: rtc-tools
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.7.0
|
|
4
4
|
Summary: Toolbox for control and optimization of water systems.
|
|
5
5
|
Home-page: https://oss.deltares.nl/web/rtc-tools/home
|
|
6
|
+
Download-URL: http://github.com/deltares/rtc-tools/
|
|
6
7
|
Author: Deltares
|
|
7
8
|
Maintainer: Deltares
|
|
8
|
-
License: UNKNOWN
|
|
9
|
-
Download-URL: http://gitlab.com/deltares/rtc-tools/
|
|
10
9
|
Platform: Windows
|
|
11
10
|
Platform: Linux
|
|
12
11
|
Platform: Mac OS-X
|
|
@@ -24,20 +23,31 @@ Classifier: Operating System :: Microsoft :: Windows
|
|
|
24
23
|
Classifier: Operating System :: POSIX
|
|
25
24
|
Classifier: Operating System :: Unix
|
|
26
25
|
Classifier: Operating System :: MacOS
|
|
27
|
-
Requires-Python: >=3.
|
|
26
|
+
Requires-Python: >=3.9
|
|
28
27
|
License-File: COPYING.LESSER
|
|
29
|
-
Requires-Dist: casadi
|
|
30
|
-
Requires-Dist: numpy
|
|
31
|
-
Requires-Dist: scipy
|
|
32
|
-
Requires-Dist: pymoca
|
|
33
|
-
Requires-Dist: rtc-tools-channel-flow
|
|
34
|
-
Requires-Dist: defusedxml
|
|
35
|
-
|
|
36
|
-
Requires-Dist: netCDF4 ; extra == 'all'
|
|
28
|
+
Requires-Dist: casadi!=3.6.6,<=3.7,>=3.6.3
|
|
29
|
+
Requires-Dist: numpy>=1.16.0
|
|
30
|
+
Requires-Dist: scipy>=1.0.0
|
|
31
|
+
Requires-Dist: pymoca==0.9.*,>=0.9.1
|
|
32
|
+
Requires-Dist: rtc-tools-channel-flow>=1.2.0
|
|
33
|
+
Requires-Dist: defusedxml>=0.7.0
|
|
34
|
+
Requires-Dist: importlib_metadata>=5.0.0; python_version < "3.10"
|
|
37
35
|
Provides-Extra: netcdf
|
|
38
|
-
Requires-Dist: netCDF4
|
|
36
|
+
Requires-Dist: netCDF4; extra == "netcdf"
|
|
37
|
+
Provides-Extra: all
|
|
38
|
+
Requires-Dist: netCDF4; extra == "all"
|
|
39
|
+
Dynamic: author
|
|
40
|
+
Dynamic: classifier
|
|
41
|
+
Dynamic: description
|
|
42
|
+
Dynamic: download-url
|
|
43
|
+
Dynamic: home-page
|
|
44
|
+
Dynamic: license-file
|
|
45
|
+
Dynamic: maintainer
|
|
46
|
+
Dynamic: platform
|
|
47
|
+
Dynamic: provides-extra
|
|
48
|
+
Dynamic: requires-dist
|
|
49
|
+
Dynamic: requires-python
|
|
50
|
+
Dynamic: summary
|
|
39
51
|
|
|
40
52
|
RTC-Tools is the Deltares toolbox for control and optimization of water systems.
|
|
41
53
|
|
|
42
|
-
|
|
43
|
-
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
rtc_tools-2.7.0.dist-info/licenses/COPYING.LESSER,sha256=46mU2C5kSwOnkqkw9XQAJlhBL2JAf1_uCD8lVcXyMRg,7652
|
|
2
|
+
rtctools/__init__.py,sha256=91hvS2-ryd2Pvw0COpsUzTwJwSnTZ035REiej-1hNI4,107
|
|
3
|
+
rtctools/_version.py,sha256=GRY0UZKaQFpBKoUgttmG_1W-QGsYu3oGtZePebKhXj4,497
|
|
4
|
+
rtctools/rtctoolsapp.py,sha256=2RVZI4QQUg0yC6ii4lr50yx1blEfHBFsAgUjLR5pBkA,4336
|
|
5
|
+
rtctools/util.py,sha256=8IGva7xWcAH-9Xcr1LaxUpYoZjF6vbo1eqdNJ9pKgGA,9098
|
|
6
|
+
rtctools/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
+
rtctools/_internal/alias_tools.py,sha256=XuQSAhhFuVtwn0yrAObZWIKPsSF4j2axXRtEmitIFPs,5310
|
|
8
|
+
rtctools/_internal/caching.py,sha256=p4gqSL7kCI7Hff-KjMEP7mhJCQSiU_lYm2MR7E18gBM,905
|
|
9
|
+
rtctools/_internal/casadi_helpers.py,sha256=q8j5h9XXXkZMUgjg6wbkcFj1mcHi5_SdEi8SrkM---M,1457
|
|
10
|
+
rtctools/_internal/debug_check_helpers.py,sha256=UgQTEPw4PyR7MbYLewSSWQqTwQj7xr5yUBk820O9Kk4,1084
|
|
11
|
+
rtctools/data/__init__.py,sha256=EllgSmCdrlvQZSd1VilvjPaeYJGhY9ErPiQtedmuFoA,157
|
|
12
|
+
rtctools/data/csv.py,sha256=hEpoTH3nhZaAvRN4r-9-nYeAjaFiNDRoiZWg8GxM3yo,5539
|
|
13
|
+
rtctools/data/netcdf.py,sha256=tMs-zcSlOR0HhajUKJVbXGNoi3GeKCM3X4DjuW8FDo8,19130
|
|
14
|
+
rtctools/data/pi.py,sha256=D2r9gaYu6qMpgWRqiWpWPSPJXWgqCVV0bz6ewgM78mc,46701
|
|
15
|
+
rtctools/data/rtc.py,sha256=tYPOzZSFE02bAXX3lgcGR1saoQNIv6oWVWH8CS0dl5Q,9079
|
|
16
|
+
rtctools/data/storage.py,sha256=67J4BRTl0AMEzlKNZ8Xdpy_4cGtwx8Lo_tL2n0G4S9w,13206
|
|
17
|
+
rtctools/data/interpolation/__init__.py,sha256=GBubCIT5mFoSTV-lOk7cpwvZekNMEe5bvqSQJ9HE34M,73
|
|
18
|
+
rtctools/data/interpolation/bspline.py,sha256=qevB842XWCH3fWlWMBqKMy1mw37ust-0YtSnb9PKCEc,948
|
|
19
|
+
rtctools/data/interpolation/bspline1d.py,sha256=HAh7m5xLBuiFKzMzuYEqZX_GmCPChKjV7ynTS6iRZOc,6166
|
|
20
|
+
rtctools/data/interpolation/bspline2d.py,sha256=ScmX0fPDxbUVtj3pbUE0L7UJocqroD_6fUT-4cvdRMc,1693
|
|
21
|
+
rtctools/optimization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
|
+
rtctools/optimization/collocated_integrated_optimization_problem.py,sha256=gB1RgEsOSgb745NxSosDqjBEJZI_ULtjlHZcuCcRjO4,131267
|
|
23
|
+
rtctools/optimization/control_tree_mixin.py,sha256=ZMMH7Xy_qIVXeLDNtPdXQ8o_0ELRYVdM5QK2R8YulKU,9036
|
|
24
|
+
rtctools/optimization/csv_lookup_table_mixin.py,sha256=TUYAT-u-mzH6HLP0iJHnLBVqV5tWnhYAqDC4Aj17MJg,17399
|
|
25
|
+
rtctools/optimization/csv_mixin.py,sha256=_6iPVK_EJ8PxnukepzkhFtidceucsozRML_DDEycYik,12453
|
|
26
|
+
rtctools/optimization/goal_programming_mixin.py,sha256=vdnKnz1Ov3OFN-J9KQiiAwHbrLjWH6o_PeZz2YfLz6k,33320
|
|
27
|
+
rtctools/optimization/goal_programming_mixin_base.py,sha256=qJQQcJlJdio4GTcrKfuBi6Nho9u16pDuuprzK0LUyhA,43835
|
|
28
|
+
rtctools/optimization/homotopy_mixin.py,sha256=Kh0kMfxB-Fo1FBGW5tPOQk24Xx_Mmw_p0YuSQotdkMU,6905
|
|
29
|
+
rtctools/optimization/initial_state_estimation_mixin.py,sha256=74QYfG-VYYTNVg-kAnCG6QoY3_sUmaID0ideF7bPkkY,3116
|
|
30
|
+
rtctools/optimization/io_mixin.py,sha256=AsZQ7YOUcUbWoczmjTXaSje5MUEsPNbQyZBJ6qzSjzU,11821
|
|
31
|
+
rtctools/optimization/linearization_mixin.py,sha256=mG5S7uwvwDydw-eBPyQKnLyKoy08EBjQh25vu97afhY,1049
|
|
32
|
+
rtctools/optimization/linearized_order_goal_programming_mixin.py,sha256=LQ2qpYt0YGLpEoerif4FJ5wwzq16q--27bsRjcqIU5A,9087
|
|
33
|
+
rtctools/optimization/min_abs_goal_programming_mixin.py,sha256=WMOv9EF8cfDJgTunzXfI_cUmBSQK26u1HJB_9EAarfM,14031
|
|
34
|
+
rtctools/optimization/modelica_mixin.py,sha256=b_VsEcg_VsAnODnTQybrY0GbuZUNQ3uugQmML6FlklE,18037
|
|
35
|
+
rtctools/optimization/netcdf_mixin.py,sha256=-zkXh3sMYE50c3kHsrmUVGWMSFm-0cXQpGrCm0yn-Tc,7563
|
|
36
|
+
rtctools/optimization/optimization_problem.py,sha256=lTk4tUBEb1xy9eyNy7-w0D6L6HxSCJ0MyvxjzrYLsR4,44824
|
|
37
|
+
rtctools/optimization/pi_mixin.py,sha256=G_6RPlXO-IOjqYxNsMZGY4fmnfxVpwN-_T5Ka3rDwK4,11788
|
|
38
|
+
rtctools/optimization/planning_mixin.py,sha256=O_Y74X8xZmaNZR4iYOe7BR06s9hnmcapbuHYHQTBPPQ,724
|
|
39
|
+
rtctools/optimization/single_pass_goal_programming_mixin.py,sha256=Zb9szg3PGT2o6gkGsXluSfEaAswkw3TKfPQDzUrj_Y4,25784
|
|
40
|
+
rtctools/optimization/timeseries.py,sha256=nCrsGCJThBMh9lvngEpbBDa834_QvklVvkxJqwX4a1M,1734
|
|
41
|
+
rtctools/simulation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
|
+
rtctools/simulation/csv_mixin.py,sha256=rGDUFPsqGHmF0_dWdXeWzWzMpkPmwCNweTBVrwSh31g,6704
|
|
43
|
+
rtctools/simulation/io_mixin.py,sha256=WIKOQxr3fS-aNbgjet9iWoUayuD22zLIYmqlWEqxXHo,6215
|
|
44
|
+
rtctools/simulation/pi_mixin.py,sha256=_TU2DrK2MQqVsyrHBD9W4SDEuot9dYmgTDNiXkDAJfk,9833
|
|
45
|
+
rtctools/simulation/simulation_problem.py,sha256=v5Lk2x-yuVb5s7ne5fFgxONxGniLHTyTR0XRzYRl1fw,50005
|
|
46
|
+
rtc_tools-2.7.0.dist-info/METADATA,sha256=ULtELHwTloQVwwzBsmnqDlvFMj-SMQmNO2r3ATeJRC8,1772
|
|
47
|
+
rtc_tools-2.7.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
48
|
+
rtc_tools-2.7.0.dist-info/entry_points.txt,sha256=DVS8sWf3b9ph9h8srEr6zmQ7ZKGwblwgZgGPZg-jRNQ,150
|
|
49
|
+
rtc_tools-2.7.0.dist-info/top_level.txt,sha256=pnBrb58PFPd1kp1dqa-JHU7R55h3alDNJIJnF3Jf9Dw,9
|
|
50
|
+
rtc_tools-2.7.0.dist-info/RECORD,,
|
|
@@ -5,12 +5,12 @@ import casadi as ca
|
|
|
5
5
|
logger = logging.getLogger("rtctools")
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
def is_affine(
|
|
8
|
+
def is_affine(expr, symbols):
|
|
9
9
|
try:
|
|
10
|
-
Af = ca.Function("f", [
|
|
11
|
-
except RuntimeError as
|
|
12
|
-
if "'eval_sx' not defined for" in str(
|
|
13
|
-
Af = ca.Function("f", [
|
|
10
|
+
Af = ca.Function("f", [symbols], [ca.jacobian(expr, symbols)]).expand()
|
|
11
|
+
except RuntimeError as error:
|
|
12
|
+
if "'eval_sx' not defined for" in str(error):
|
|
13
|
+
Af = ca.Function("f", [symbols], [ca.jacobian(expr, symbols)])
|
|
14
14
|
else:
|
|
15
15
|
raise
|
|
16
16
|
return Af.sparsity_jac(0, 0).nnz() == 0
|
rtctools/_version.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
|
|
2
|
-
# This file was generated by 'versioneer.py' (0.
|
|
2
|
+
# This file was generated by 'versioneer.py' (0.29) from
|
|
3
3
|
# revision-control system data, or from the parent directory name of an
|
|
4
4
|
# unpacked source archive. Distribution tarballs contain a pre-generated copy
|
|
5
5
|
# of this file.
|
|
@@ -8,11 +8,11 @@ import json
|
|
|
8
8
|
|
|
9
9
|
version_json = '''
|
|
10
10
|
{
|
|
11
|
-
"date": "
|
|
11
|
+
"date": "2025-05-23T14:56:50+0200",
|
|
12
12
|
"dirty": false,
|
|
13
13
|
"error": null,
|
|
14
|
-
"full-revisionid": "
|
|
15
|
-
"version": "2.
|
|
14
|
+
"full-revisionid": "16ab781cd81009655c779d5cc5b7c4aa3e73fa85",
|
|
15
|
+
"version": "2.7.0"
|
|
16
16
|
}
|
|
17
17
|
''' # END VERSION_JSON
|
|
18
18
|
|
rtctools/data/csv.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import sys
|
|
3
3
|
from datetime import datetime
|
|
4
|
+
from typing import Union
|
|
4
5
|
|
|
5
6
|
import numpy as np
|
|
6
7
|
|
|
@@ -41,6 +42,21 @@ def _boolean_to_nan(data, fname):
|
|
|
41
42
|
return data
|
|
42
43
|
|
|
43
44
|
|
|
45
|
+
def _string_to_datetime(string: Union[str, bytes]) -> datetime:
|
|
46
|
+
"""Convert a string to a datetime object."""
|
|
47
|
+
if isinstance(string, bytes):
|
|
48
|
+
string = string.decode("utf-8")
|
|
49
|
+
return datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _string_to_float(string: Union[str, bytes]) -> float:
|
|
53
|
+
"""Convert a string to a float."""
|
|
54
|
+
if isinstance(string, bytes):
|
|
55
|
+
string = string.decode("utf-8")
|
|
56
|
+
string = string.replace(",", ".")
|
|
57
|
+
return float(string)
|
|
58
|
+
|
|
59
|
+
|
|
44
60
|
def load(fname, delimiter=",", with_time=False):
|
|
45
61
|
"""
|
|
46
62
|
Check delimiter of csv and read contents to an array. Assumes no date-time conversion needed.
|
|
@@ -53,7 +69,7 @@ def load(fname, delimiter=",", with_time=False):
|
|
|
53
69
|
"""
|
|
54
70
|
c = {}
|
|
55
71
|
if with_time:
|
|
56
|
-
c.update({0:
|
|
72
|
+
c.update({0: _string_to_datetime})
|
|
57
73
|
|
|
58
74
|
# Check delimiter of csv file. If semicolon, check if decimal separator is
|
|
59
75
|
# a comma.
|
|
@@ -73,12 +89,7 @@ def load(fname, delimiter=",", with_time=False):
|
|
|
73
89
|
# If commas are used as decimal separator, we need additional
|
|
74
90
|
# converters.
|
|
75
91
|
if n_comma_decimal:
|
|
76
|
-
c.update(
|
|
77
|
-
{
|
|
78
|
-
i + len(c): lambda str: float(str.decode("utf-8").replace(",", "."))
|
|
79
|
-
for i in range(1 + n_semicolon - len(c))
|
|
80
|
-
}
|
|
81
|
-
)
|
|
92
|
+
c.update({i + len(c): _string_to_float for i in range(1 + n_semicolon - len(c))})
|
|
82
93
|
|
|
83
94
|
# Read the csv file and convert to array
|
|
84
95
|
try:
|
|
@@ -55,6 +55,7 @@ class BSpline1D(BSpline):
|
|
|
55
55
|
epsilon=1e-7,
|
|
56
56
|
delta=1e-4,
|
|
57
57
|
interior_pts=None,
|
|
58
|
+
ipopt_options=None,
|
|
58
59
|
):
|
|
59
60
|
"""
|
|
60
61
|
fit() returns a tck tuple like scipy.interpolate.splrep, but adjusts
|
|
@@ -153,7 +154,10 @@ class BSpline1D(BSpline):
|
|
|
153
154
|
nlp = {"x": c, "f": f, "g": g}
|
|
154
155
|
my_solver = "ipopt"
|
|
155
156
|
solver = nlpsol(
|
|
156
|
-
"solver",
|
|
157
|
+
"solver",
|
|
158
|
+
my_solver,
|
|
159
|
+
nlp,
|
|
160
|
+
{"print_time": 0, "expand": True, "ipopt": ipopt_options},
|
|
157
161
|
)
|
|
158
162
|
sol = solver(lbg=lbg, ubg=ubg)
|
|
159
163
|
stats = solver.stats()
|
rtctools/data/netcdf.py
CHANGED
|
@@ -401,20 +401,21 @@ class ExportDataset:
|
|
|
401
401
|
"""
|
|
402
402
|
assert len(set(variable_names)) == len(variable_names)
|
|
403
403
|
|
|
404
|
-
assert (
|
|
405
|
-
|
|
406
|
-
)
|
|
407
|
-
assert (
|
|
408
|
-
|
|
409
|
-
)
|
|
404
|
+
assert self.__time_dim is not None, (
|
|
405
|
+
"First call write_times to ensure the time dimension has been created."
|
|
406
|
+
)
|
|
407
|
+
assert self.__station_dim is not None, (
|
|
408
|
+
"First call write_station_data to ensure the station dimension has been created"
|
|
409
|
+
)
|
|
410
410
|
assert (
|
|
411
411
|
self.__station_id_to_index_mapping is not None
|
|
412
412
|
) # should also be created in write_station_data
|
|
413
413
|
|
|
414
414
|
if ensemble_size > 1:
|
|
415
|
-
assert (
|
|
416
|
-
|
|
417
|
-
|
|
415
|
+
assert self.__ensemble_member_dim is not None, (
|
|
416
|
+
"First call write_ensemble_data to ensure "
|
|
417
|
+
"the realization dimension has been created"
|
|
418
|
+
)
|
|
418
419
|
|
|
419
420
|
for variable_name in variable_names:
|
|
420
421
|
self.__dataset.createVariable(
|
|
@@ -446,15 +447,15 @@ class ExportDataset:
|
|
|
446
447
|
:param values: The values that are to be written to the file
|
|
447
448
|
:param ensemble_size: the number of members in the ensemble
|
|
448
449
|
"""
|
|
449
|
-
assert (
|
|
450
|
-
|
|
451
|
-
)
|
|
450
|
+
assert self.__station_id_to_index_mapping is not None, (
|
|
451
|
+
"First call write_station_data and create_variables."
|
|
452
|
+
)
|
|
452
453
|
|
|
453
454
|
station_index = self.__station_id_to_index_mapping[station_id]
|
|
454
455
|
if ensemble_size > 1:
|
|
455
|
-
self.__dataset.variables[variable_name][
|
|
456
|
-
|
|
457
|
-
|
|
456
|
+
self.__dataset.variables[variable_name][:, station_index, ensemble_member_index] = (
|
|
457
|
+
values
|
|
458
|
+
)
|
|
458
459
|
else:
|
|
459
460
|
self.__dataset.variables[variable_name][:, station_index] = values
|
|
460
461
|
|
rtctools/data/pi.py
CHANGED
|
@@ -333,8 +333,11 @@ class ParameterConfig:
|
|
|
333
333
|
|
|
334
334
|
parameters = group.findall("pi:parameter", ns)
|
|
335
335
|
for parameter in parameters:
|
|
336
|
-
yield
|
|
337
|
-
|
|
336
|
+
yield (
|
|
337
|
+
location_id,
|
|
338
|
+
model_id,
|
|
339
|
+
parameter.attrib["id"],
|
|
340
|
+
self.__parse_parameter(parameter),
|
|
338
341
|
)
|
|
339
342
|
|
|
340
343
|
|
|
@@ -369,8 +372,6 @@ class Timeseries:
|
|
|
369
372
|
self.__folder = folder
|
|
370
373
|
self.__basename = basename
|
|
371
374
|
|
|
372
|
-
self.__path_xml = os.path.join(self.__folder, basename + ".xml")
|
|
373
|
-
|
|
374
375
|
self.__internal_dtype = np.float64
|
|
375
376
|
self.__pi_dtype = np.float32
|
|
376
377
|
|
|
@@ -378,7 +379,7 @@ class Timeseries:
|
|
|
378
379
|
if self.make_new_file:
|
|
379
380
|
self.__reset_xml_tree()
|
|
380
381
|
else:
|
|
381
|
-
self.__tree = DefusedElementTree.parse(self.
|
|
382
|
+
self.__tree = DefusedElementTree.parse(self.path)
|
|
382
383
|
self.__xml_root = self.__tree.getroot()
|
|
383
384
|
|
|
384
385
|
self.__values = [{}]
|
|
@@ -801,13 +802,20 @@ class Timeseries:
|
|
|
801
802
|
# Add series to xml
|
|
802
803
|
self.__xml_root.append(series)
|
|
803
804
|
|
|
804
|
-
def write(self):
|
|
805
|
+
def write(self, output_folder=None, output_filename=None) -> None:
|
|
805
806
|
"""
|
|
806
807
|
Writes the time series data to disk.
|
|
808
|
+
|
|
809
|
+
:param output_folder: The folder in which the output file is located.
|
|
810
|
+
If None, the original folder is used.
|
|
811
|
+
:param output_filename: The name of the output file without extension.
|
|
812
|
+
If None, the original filename is used.
|
|
807
813
|
"""
|
|
814
|
+
xml_path = self.output_path(output_folder, output_filename)
|
|
815
|
+
binary_path = self.output_binary_path(output_folder, output_filename)
|
|
808
816
|
|
|
809
817
|
if self.__binary:
|
|
810
|
-
f = io.open(
|
|
818
|
+
f = io.open(binary_path, "wb")
|
|
811
819
|
|
|
812
820
|
if self.make_new_file:
|
|
813
821
|
# Force reinitialization in case write() is called more than once
|
|
@@ -876,29 +884,26 @@ class Timeseries:
|
|
|
876
884
|
events = series.findall("pi:event", ns)
|
|
877
885
|
|
|
878
886
|
t = self.__start_datetime
|
|
879
|
-
for i in
|
|
887
|
+
for i, value in enumerate(values):
|
|
880
888
|
if self.dt is None:
|
|
881
889
|
t = self.times[i]
|
|
882
|
-
# Set the date/time, so that any date/time steps that
|
|
883
|
-
# are wrong in the placeholder file are corrected.
|
|
884
|
-
events[i].set("date", t.strftime("%Y-%m-%d"))
|
|
885
|
-
events[i].set("time", t.strftime("%H:%M:%S"))
|
|
886
890
|
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
891
|
+
if i < len(events):
|
|
892
|
+
event = events[i]
|
|
893
|
+
else:
|
|
894
|
+
event = ET.Element("pi:event")
|
|
895
|
+
series.append(event)
|
|
896
|
+
|
|
897
|
+
# Always set the date/time, so that any date/time steps
|
|
898
|
+
# that are wrong in the placeholder file are corrected.
|
|
895
899
|
event.set("date", t.strftime("%Y-%m-%d"))
|
|
896
900
|
event.set("time", t.strftime("%H:%M:%S"))
|
|
901
|
+
|
|
897
902
|
if nans[i]:
|
|
898
903
|
event.set("value", miss_val)
|
|
899
904
|
else:
|
|
900
|
-
event.set("value", str(
|
|
901
|
-
|
|
905
|
+
event.set("value", str(value))
|
|
906
|
+
|
|
902
907
|
if self.dt:
|
|
903
908
|
t += self.dt
|
|
904
909
|
|
|
@@ -911,7 +916,7 @@ class Timeseries:
|
|
|
911
916
|
f.close()
|
|
912
917
|
|
|
913
918
|
self.format_xml_data()
|
|
914
|
-
self.__tree.write(
|
|
919
|
+
self.__tree.write(xml_path)
|
|
915
920
|
|
|
916
921
|
def format_xml_data(self):
|
|
917
922
|
"""
|
|
@@ -1170,16 +1175,45 @@ class Timeseries:
|
|
|
1170
1175
|
self.__end_datetime = end_datetime
|
|
1171
1176
|
|
|
1172
1177
|
@property
|
|
1173
|
-
def path(self):
|
|
1174
|
-
|
|
1178
|
+
def path(self) -> str:
|
|
1179
|
+
"""
|
|
1180
|
+
The path to the original xml file.
|
|
1181
|
+
"""
|
|
1182
|
+
return os.path.join(self.__folder, self.__basename + ".xml")
|
|
1175
1183
|
|
|
1176
1184
|
@property
|
|
1177
|
-
def binary_path(self):
|
|
1185
|
+
def binary_path(self) -> str:
|
|
1178
1186
|
"""
|
|
1179
|
-
The path
|
|
1187
|
+
The path to the original binary data .bin file.
|
|
1180
1188
|
"""
|
|
1181
1189
|
return os.path.join(self.__folder, self.__basename + ".bin")
|
|
1182
1190
|
|
|
1191
|
+
def _output_path_without_extension(self, output_folder=None, output_filename=None) -> str:
|
|
1192
|
+
"""
|
|
1193
|
+
Get the output path without file extension.
|
|
1194
|
+
"""
|
|
1195
|
+
if output_folder is None:
|
|
1196
|
+
output_folder = self.__folder
|
|
1197
|
+
if output_filename is None:
|
|
1198
|
+
output_filename = self.__basename
|
|
1199
|
+
return os.path.join(output_folder, output_filename)
|
|
1200
|
+
|
|
1201
|
+
def output_path(self, output_folder=None, output_filename=None) -> str:
|
|
1202
|
+
"""
|
|
1203
|
+
Get the path to the output xml file.
|
|
1204
|
+
|
|
1205
|
+
The optional arguments are the same as in :py:method:`write`.
|
|
1206
|
+
"""
|
|
1207
|
+
return self._output_path_without_extension(output_folder, output_filename) + ".xml"
|
|
1208
|
+
|
|
1209
|
+
def output_binary_path(self, output_folder=None, output_filename=None) -> str:
|
|
1210
|
+
"""
|
|
1211
|
+
Get the path to the output binary file.
|
|
1212
|
+
|
|
1213
|
+
The optional arguments are the same as in :py:method:`write`.
|
|
1214
|
+
"""
|
|
1215
|
+
return self._output_path_without_extension(output_folder, output_filename) + ".bin"
|
|
1216
|
+
|
|
1183
1217
|
def items(self, ensemble_member=0):
|
|
1184
1218
|
"""
|
|
1185
1219
|
Returns an iterator over all timeseries IDs and value arrays for the given
|
rtctools/data/rtc.py
CHANGED
|
@@ -60,9 +60,9 @@ class DataConfig:
|
|
|
60
60
|
logger.error(message)
|
|
61
61
|
raise Exception(message)
|
|
62
62
|
else:
|
|
63
|
-
self.__location_parameter_ids[
|
|
64
|
-
|
|
65
|
-
|
|
63
|
+
self.__location_parameter_ids[internal_id] = (
|
|
64
|
+
self.__pi_location_parameter_id(pi_timeseries, "fews")
|
|
65
|
+
)
|
|
66
66
|
self.__variable_map[external_id] = internal_id
|
|
67
67
|
|
|
68
68
|
for k in ["import", "export"]:
|
|
@@ -898,11 +898,11 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
898
898
|
function_options,
|
|
899
899
|
)
|
|
900
900
|
|
|
901
|
+
# Expand the residual function if possible.
|
|
901
902
|
try:
|
|
902
903
|
dae_residual_function_integrated = dae_residual_function_integrated.expand()
|
|
903
904
|
except RuntimeError as e:
|
|
904
|
-
|
|
905
|
-
if "'eval_sx' not defined for External" in str(e):
|
|
905
|
+
if "'eval_sx' not defined for" in str(e):
|
|
906
906
|
pass
|
|
907
907
|
else:
|
|
908
908
|
raise
|
|
@@ -933,13 +933,13 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
933
933
|
[dae_residual_collocated],
|
|
934
934
|
function_options,
|
|
935
935
|
)
|
|
936
|
+
# Expand the residual function if possible.
|
|
936
937
|
try:
|
|
937
938
|
self.__dae_residual_function_collocated = (
|
|
938
939
|
self.__dae_residual_function_collocated.expand()
|
|
939
940
|
)
|
|
940
941
|
except RuntimeError as e:
|
|
941
|
-
|
|
942
|
-
if "'eval_sx' not defined for External" in str(e):
|
|
942
|
+
if "'eval_sx' not defined for" in str(e):
|
|
943
943
|
pass
|
|
944
944
|
else:
|
|
945
945
|
raise
|
|
@@ -1028,8 +1028,8 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
1028
1028
|
+ len(self.dae_variables["constant_inputs"])
|
|
1029
1029
|
]
|
|
1030
1030
|
constant_inputs_1 = accumulated_U[
|
|
1031
|
-
2 * len(collocated_variables)
|
|
1032
|
-
|
|
1031
|
+
2 * len(collocated_variables) + len(self.dae_variables["constant_inputs"]) : 2
|
|
1032
|
+
* len(collocated_variables)
|
|
1033
1033
|
+ 2 * len(self.dae_variables["constant_inputs"])
|
|
1034
1034
|
]
|
|
1035
1035
|
|
|
@@ -1803,9 +1803,9 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
1803
1803
|
# Cast delay from DM to np.array
|
|
1804
1804
|
delay = delay.toarray().flatten()
|
|
1805
1805
|
|
|
1806
|
-
assert np.all(
|
|
1807
|
-
|
|
1808
|
-
)
|
|
1806
|
+
assert np.all(np.isfinite(delay)), (
|
|
1807
|
+
"Delay duration must be resolvable to real values at transcribe()"
|
|
1808
|
+
)
|
|
1809
1809
|
|
|
1810
1810
|
out_times = np.concatenate([history_times, collocation_times])
|
|
1811
1811
|
out_values = ca.veccat(
|
|
@@ -2043,9 +2043,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
2043
2043
|
def controls(self):
|
|
2044
2044
|
return self.__controls
|
|
2045
2045
|
|
|
2046
|
-
def _collint_get_lbx_ubx(self, count, indices):
|
|
2047
|
-
bounds = self.bounds()
|
|
2048
|
-
|
|
2046
|
+
def _collint_get_lbx_ubx(self, bounds, count, indices):
|
|
2049
2047
|
lbx = np.full(count, -np.inf, dtype=np.float64)
|
|
2050
2048
|
ubx = np.full(count, np.inf, dtype=np.float64)
|
|
2051
2049
|
|
|
@@ -2210,7 +2208,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
2210
2208
|
count = max(count, control_indices_stop)
|
|
2211
2209
|
|
|
2212
2210
|
discrete = self._collint_get_discrete(count, indices)
|
|
2213
|
-
lbx, ubx = self._collint_get_lbx_ubx(count, indices)
|
|
2211
|
+
lbx, ubx = self._collint_get_lbx_ubx(bounds, count, indices)
|
|
2214
2212
|
x0 = self._collint_get_x0(count, indices)
|
|
2215
2213
|
|
|
2216
2214
|
# Return number of control variables
|
|
@@ -2326,7 +2324,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
2326
2324
|
offset += 1
|
|
2327
2325
|
|
|
2328
2326
|
discrete = self._collint_get_discrete(count, indices)
|
|
2329
|
-
lbx, ubx = self._collint_get_lbx_ubx(count, indices)
|
|
2327
|
+
lbx, ubx = self._collint_get_lbx_ubx(bounds, count, indices)
|
|
2330
2328
|
x0 = self._collint_get_x0(count, indices)
|
|
2331
2329
|
|
|
2332
2330
|
# Return number of state variables
|
|
@@ -2869,8 +2867,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
2869
2867
|
|
|
2870
2868
|
# Check coefficient matrix
|
|
2871
2869
|
logger.info(
|
|
2872
|
-
"Sanity check on objective and constraints Jacobian matrix"
|
|
2873
|
-
"/constant coefficients values"
|
|
2870
|
+
"Sanity check on objective and constraints Jacobian matrix/constant coefficients values"
|
|
2874
2871
|
)
|
|
2875
2872
|
|
|
2876
2873
|
in_var = nlp["x"]
|
|
@@ -3113,7 +3110,7 @@ class CollocatedIntegratedOptimizationProblem(OptimizationProblem, metaclass=ABC
|
|
|
3113
3110
|
variable_to_all_indices = {k: set(v) for k, v in indices[0].items()}
|
|
3114
3111
|
for ensemble_indices in indices[1:]:
|
|
3115
3112
|
for k, v in ensemble_indices.items():
|
|
3116
|
-
variable_to_all_indices[k] |= v
|
|
3113
|
+
variable_to_all_indices[k] |= set(v)
|
|
3117
3114
|
|
|
3118
3115
|
if len(inds_up) > 0:
|
|
3119
3116
|
exceedences = []
|
|
@@ -86,6 +86,11 @@ class ControlTreeMixin(OptimizationProblem):
|
|
|
86
86
|
logger.debug("ControlTreeMixin: Branching times:")
|
|
87
87
|
logger.debug(self.__branching_times)
|
|
88
88
|
|
|
89
|
+
# Avoid calling constant_inputs() many times
|
|
90
|
+
constant_inputs = [
|
|
91
|
+
self.constant_inputs(ensemble_member=i) for i in range(self.ensemble_size)
|
|
92
|
+
]
|
|
93
|
+
|
|
89
94
|
# Branches start at branching times, so that the tree looks like the following:
|
|
90
95
|
#
|
|
91
96
|
# *-----
|
|
@@ -122,18 +127,16 @@ class ControlTreeMixin(OptimizationProblem):
|
|
|
122
127
|
for forecast_variable in options["forecast_variables"]:
|
|
123
128
|
# We assume the time stamps of the forecasts in all ensemble
|
|
124
129
|
# members to be identical
|
|
125
|
-
timeseries =
|
|
130
|
+
timeseries = constant_inputs[0][forecast_variable]
|
|
126
131
|
els = np.logical_and(
|
|
127
132
|
timeseries.times >= branching_time_0, timeseries.times < branching_time_1
|
|
128
133
|
)
|
|
129
134
|
|
|
130
135
|
# Compute distance between ensemble members
|
|
131
136
|
for i, member_i in enumerate(branches[current_branch]):
|
|
132
|
-
timeseries_i =
|
|
137
|
+
timeseries_i = constant_inputs[member_i][forecast_variable]
|
|
133
138
|
for j, member_j in enumerate(branches[current_branch]):
|
|
134
|
-
timeseries_j =
|
|
135
|
-
forecast_variable
|
|
136
|
-
]
|
|
139
|
+
timeseries_j = constant_inputs[member_j][forecast_variable]
|
|
137
140
|
distances[i, j] += np.linalg.norm(
|
|
138
141
|
timeseries_i.values[els] - timeseries_j.values[els]
|
|
139
142
|
)
|
|
@@ -55,7 +55,7 @@ class LookupTable(LookupTableBase):
|
|
|
55
55
|
"This lookup table was not instantiated with tck metadata. \
|
|
56
56
|
Domain/Range information is unavailable."
|
|
57
57
|
)
|
|
58
|
-
if
|
|
58
|
+
if isinstance(t, tuple) and len(t) == 2:
|
|
59
59
|
raise NotImplementedError(
|
|
60
60
|
"Domain/Range information is not yet implemented for 2D LookupTables"
|
|
61
61
|
)
|
|
@@ -298,8 +298,9 @@ class CSVLookupTableMixin(OptimizationProblem):
|
|
|
298
298
|
def check_lookup_table(lookup_table):
|
|
299
299
|
if lookup_table in self.__lookup_tables:
|
|
300
300
|
raise Exception(
|
|
301
|
-
"Cannot add lookup table {},"
|
|
302
|
-
|
|
301
|
+
"Cannot add lookup table {},since there is already one with this name.".format(
|
|
302
|
+
lookup_table
|
|
303
|
+
)
|
|
303
304
|
)
|
|
304
305
|
|
|
305
306
|
# Read CSV files
|
|
@@ -358,6 +359,7 @@ class CSVLookupTableMixin(OptimizationProblem):
|
|
|
358
359
|
k=k,
|
|
359
360
|
monotonicity=mono,
|
|
360
361
|
curvature=curv,
|
|
362
|
+
ipopt_options={"nlp_scaling_method": "none"},
|
|
361
363
|
)
|
|
362
364
|
else:
|
|
363
365
|
raise Exception(
|
|
@@ -98,6 +98,9 @@ class CSVMixin(IOMixin):
|
|
|
98
98
|
names=True,
|
|
99
99
|
encoding=None,
|
|
100
100
|
)
|
|
101
|
+
if len(self.__ensemble.shape) == 0:
|
|
102
|
+
# If there is only one ensemble member, the array is 0-dimensional.
|
|
103
|
+
self.__ensemble = np.expand_dims(self.__ensemble, 0)
|
|
101
104
|
|
|
102
105
|
logger.debug("CSVMixin: Read ensemble description")
|
|
103
106
|
|
|
@@ -351,8 +351,9 @@ class GoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
351
351
|
if goal.has_target_bounds:
|
|
352
352
|
# We use a violation variable formulation, with the violation
|
|
353
353
|
# variables epsilon bounded between 0 and 1.
|
|
354
|
-
m, M =
|
|
355
|
-
epsilon, np.inf, dtype=np.float64
|
|
354
|
+
m, M = (
|
|
355
|
+
np.full_like(epsilon, -np.inf, dtype=np.float64),
|
|
356
|
+
np.full_like(epsilon, np.inf, dtype=np.float64),
|
|
356
357
|
)
|
|
357
358
|
|
|
358
359
|
# A function range does not have to be specified for critical
|
|
@@ -667,6 +668,7 @@ class GoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
667
668
|
logger.info("Starting goal programming")
|
|
668
669
|
|
|
669
670
|
success = False
|
|
671
|
+
self.skip_priority = False
|
|
670
672
|
|
|
671
673
|
self.__constraint_store = [OrderedDict() for ensemble_member in range(self.ensemble_size)]
|
|
672
674
|
self.__path_constraint_store = [
|
|
@@ -691,6 +693,13 @@ class GoalProgrammingMixin(_GoalProgrammingMixinBase):
|
|
|
691
693
|
# Call the pre priority hook
|
|
692
694
|
self.priority_started(priority)
|
|
693
695
|
|
|
696
|
+
if self.skip_priority:
|
|
697
|
+
logger.info(
|
|
698
|
+
"priority {} was removed in priority_started. No optimization problem "
|
|
699
|
+
"is solved at this priority.".format(priority)
|
|
700
|
+
)
|
|
701
|
+
continue
|
|
702
|
+
|
|
694
703
|
(
|
|
695
704
|
self.__subproblem_epsilons,
|
|
696
705
|
self.__subproblem_objectives,
|
|
@@ -437,7 +437,7 @@ class _GoalConstraint:
|
|
|
437
437
|
):
|
|
438
438
|
assert isinstance(m, (float, np.ndarray, Timeseries))
|
|
439
439
|
assert isinstance(M, (float, np.ndarray, Timeseries))
|
|
440
|
-
assert type(m)
|
|
440
|
+
assert type(m) is type(M)
|
|
441
441
|
|
|
442
442
|
# NumPy arrays only allowed for vector goals
|
|
443
443
|
if isinstance(m, np.ndarray):
|
|
@@ -982,8 +982,9 @@ class _GoalProgrammingMixinBase(OptimizationProblem, metaclass=ABCMeta):
|
|
|
982
982
|
if goal.has_target_bounds:
|
|
983
983
|
# We use a violation variable formulation, with the violation
|
|
984
984
|
# variables epsilon bounded between 0 and 1.
|
|
985
|
-
m, M =
|
|
986
|
-
epsilon, np.inf, dtype=np.float64
|
|
985
|
+
m, M = (
|
|
986
|
+
np.full_like(epsilon, -np.inf, dtype=np.float64),
|
|
987
|
+
np.full_like(epsilon, np.inf, dtype=np.float64),
|
|
987
988
|
)
|
|
988
989
|
|
|
989
990
|
# A function range does not have to be specified for critical
|
|
@@ -1081,6 +1082,7 @@ class _GoalProgrammingMixinBase(OptimizationProblem, metaclass=ABCMeta):
|
|
|
1081
1082
|
|
|
1082
1083
|
:param priority: The priority level that was started.
|
|
1083
1084
|
"""
|
|
1085
|
+
self.skip_priority = False
|
|
1084
1086
|
pass
|
|
1085
1087
|
|
|
1086
1088
|
def priority_completed(self, priority: int) -> None:
|
|
@@ -1,10 +1,18 @@
|
|
|
1
|
+
import importlib.resources
|
|
1
2
|
import itertools
|
|
2
3
|
import logging
|
|
4
|
+
import sys
|
|
3
5
|
from typing import Dict, Union
|
|
4
6
|
|
|
7
|
+
# Python 3.9's importlib.metadata does not support the "group" parameter to
|
|
8
|
+
# entry_points yet.
|
|
9
|
+
if sys.version_info < (3, 10):
|
|
10
|
+
import importlib_metadata
|
|
11
|
+
else:
|
|
12
|
+
from importlib import metadata as importlib_metadata
|
|
13
|
+
|
|
5
14
|
import casadi as ca
|
|
6
15
|
import numpy as np
|
|
7
|
-
import pkg_resources
|
|
8
16
|
import pymoca
|
|
9
17
|
import pymoca.backends.casadi.api
|
|
10
18
|
|
|
@@ -48,9 +56,21 @@ class ModelicaMixin(OptimizationProblem):
|
|
|
48
56
|
else:
|
|
49
57
|
model_name = self.__class__.__name__
|
|
50
58
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
59
|
+
compiler_options = self.compiler_options()
|
|
60
|
+
logger.info(f"Loading/compiling model {model_name}.")
|
|
61
|
+
try:
|
|
62
|
+
self.__pymoca_model = pymoca.backends.casadi.api.transfer_model(
|
|
63
|
+
kwargs["model_folder"], model_name, compiler_options
|
|
64
|
+
)
|
|
65
|
+
except (RuntimeError, ModuleNotFoundError) as error:
|
|
66
|
+
if not compiler_options.get("cache", False):
|
|
67
|
+
raise error
|
|
68
|
+
compiler_options["cache"] = False
|
|
69
|
+
logger.warning(f"Loading model {model_name} using a cache file failed: {error}.")
|
|
70
|
+
logger.info(f"Compiling model {model_name}.")
|
|
71
|
+
self.__pymoca_model = pymoca.backends.casadi.api.transfer_model(
|
|
72
|
+
kwargs["model_folder"], model_name, compiler_options
|
|
73
|
+
)
|
|
54
74
|
|
|
55
75
|
# Extract the CasADi MX variables used in the model
|
|
56
76
|
self.__mx = {}
|
|
@@ -162,9 +182,9 @@ class ModelicaMixin(OptimizationProblem):
|
|
|
162
182
|
# Where imported model libraries are located.
|
|
163
183
|
library_folders = self.modelica_library_folders.copy()
|
|
164
184
|
|
|
165
|
-
for ep in
|
|
185
|
+
for ep in importlib_metadata.entry_points(group="rtctools.libraries.modelica"):
|
|
166
186
|
if ep.name == "library_folder":
|
|
167
|
-
library_folders.append(
|
|
187
|
+
library_folders.append(str(importlib.resources.files(ep.module).joinpath(ep.attr)))
|
|
168
188
|
|
|
169
189
|
compiler_options["library_folders"] = library_folders
|
|
170
190
|
|
|
@@ -314,7 +334,7 @@ class ModelicaMixin(OptimizationProblem):
|
|
|
314
334
|
try:
|
|
315
335
|
(m, M) = bounds[sym_name]
|
|
316
336
|
except KeyError:
|
|
317
|
-
if self.__python_types.get(sym_name, float)
|
|
337
|
+
if self.__python_types.get(sym_name, float) is bool:
|
|
318
338
|
(m, M) = (0, 1)
|
|
319
339
|
else:
|
|
320
340
|
(m, M) = (-np.inf, np.inf)
|
|
@@ -388,7 +408,7 @@ class ModelicaMixin(OptimizationProblem):
|
|
|
388
408
|
return seed
|
|
389
409
|
|
|
390
410
|
def variable_is_discrete(self, variable):
|
|
391
|
-
return self.__python_types.get(variable, float)
|
|
411
|
+
return self.__python_types.get(variable, float) is not float
|
|
392
412
|
|
|
393
413
|
@property
|
|
394
414
|
@cached
|
|
@@ -314,6 +314,24 @@ class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
|
|
|
314
314
|
if log_level == logging.ERROR and not log_solver_failure_as_error:
|
|
315
315
|
log_level = logging.INFO
|
|
316
316
|
|
|
317
|
+
if self.solver_options()["solver"].lower() == "knitro":
|
|
318
|
+
list_feas_flags = [
|
|
319
|
+
"KN_RC_OPTIMAL_OR_SATISFACTORY",
|
|
320
|
+
"KN_RC_ITER_LIMIT_FEAS",
|
|
321
|
+
"KN_RC_NEAR_OPT",
|
|
322
|
+
"KN_RC_FEAS_XTOL",
|
|
323
|
+
"KN_RC_FEAS_NO_IMPROVE",
|
|
324
|
+
"KN_RC_FEAS_FTOL",
|
|
325
|
+
"KN_RC_TIME_LIMIT_FEAS",
|
|
326
|
+
"KN_RC_FEVAL_LIMIT_FEAS",
|
|
327
|
+
"KN_RC_MIP_EXH_FEAS",
|
|
328
|
+
"KN_RC_MIP_TERM_FEAS",
|
|
329
|
+
"KN_RC_MIP_SOLVE_LIMIT_FEAS",
|
|
330
|
+
"KN_RC_MIP_NODE_LIMIT_FEAS",
|
|
331
|
+
]
|
|
332
|
+
if solver_stats["return_status"] in list_feas_flags:
|
|
333
|
+
success = True
|
|
334
|
+
|
|
317
335
|
return success, log_level
|
|
318
336
|
|
|
319
337
|
@abstractproperty
|
|
@@ -277,3 +277,16 @@ class PIMixin(IOMixin):
|
|
|
277
277
|
:class:`pi.Timeseries` object for holding the output data.
|
|
278
278
|
"""
|
|
279
279
|
return self.__timeseries_export
|
|
280
|
+
|
|
281
|
+
def set_unit(self, variable: str, unit: str):
|
|
282
|
+
"""
|
|
283
|
+
Set the unit of a time series.
|
|
284
|
+
|
|
285
|
+
:param variable: Time series ID.
|
|
286
|
+
:param unit: Unit.
|
|
287
|
+
"""
|
|
288
|
+
assert hasattr(self, "_PIMixin__timeseries_import"), (
|
|
289
|
+
"set_unit can only be called after read() in pre() has finished."
|
|
290
|
+
)
|
|
291
|
+
self.__timeseries_import.set_unit(variable, unit, 0)
|
|
292
|
+
self.__timeseries_export.set_unit(variable, unit, 0)
|
rtctools/rtctoolsapp.py
CHANGED
|
@@ -1,9 +1,17 @@
|
|
|
1
|
+
import importlib.resources
|
|
1
2
|
import logging
|
|
2
3
|
import os
|
|
3
4
|
import shutil
|
|
4
5
|
import sys
|
|
5
6
|
from pathlib import Path
|
|
6
7
|
|
|
8
|
+
# Python 3.9's importlib.metadata does not support the "group" parameter to
|
|
9
|
+
# entry_points yet.
|
|
10
|
+
if sys.version_info < (3, 10):
|
|
11
|
+
import importlib_metadata
|
|
12
|
+
else:
|
|
13
|
+
from importlib import metadata as importlib_metadata
|
|
14
|
+
|
|
7
15
|
import rtctools
|
|
8
16
|
|
|
9
17
|
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s")
|
|
@@ -23,9 +31,6 @@ def copy_libraries(*args):
|
|
|
23
31
|
if not os.path.exists(path):
|
|
24
32
|
sys.exit("Folder '{}' does not exist".format(path))
|
|
25
33
|
|
|
26
|
-
# pkg_resources can be quite a slow import, so we do it here
|
|
27
|
-
import pkg_resources
|
|
28
|
-
|
|
29
34
|
def _copytree(src, dst, symlinks=False, ignore=None):
|
|
30
35
|
if not os.path.exists(dst):
|
|
31
36
|
os.makedirs(dst)
|
|
@@ -56,11 +61,10 @@ def copy_libraries(*args):
|
|
|
56
61
|
dst = Path(path)
|
|
57
62
|
|
|
58
63
|
library_folders = []
|
|
59
|
-
|
|
64
|
+
|
|
65
|
+
for ep in importlib_metadata.entry_points(group="rtctools.libraries.modelica"):
|
|
60
66
|
if ep.name == "library_folder":
|
|
61
|
-
library_folders.append(
|
|
62
|
-
Path(pkg_resources.resource_filename(ep.module_name, ep.attrs[0]))
|
|
63
|
-
)
|
|
67
|
+
library_folders.append(Path(importlib.resources.files(ep.module).joinpath(ep.attr)))
|
|
64
68
|
|
|
65
69
|
tlds = {}
|
|
66
70
|
for lf in library_folders:
|
|
@@ -100,11 +104,8 @@ def download_examples(*args):
|
|
|
100
104
|
from zipfile import ZipFile
|
|
101
105
|
|
|
102
106
|
version = rtctools.__version__
|
|
103
|
-
rtc_full_name = "rtc-tools-{}".format(version)
|
|
104
107
|
try:
|
|
105
|
-
url = "https://
|
|
106
|
-
version, rtc_full_name
|
|
107
|
-
)
|
|
108
|
+
url = "https://github.com/deltares/rtc-tools/zipball/{}".format(version)
|
|
108
109
|
|
|
109
110
|
opener = urllib.request.build_opener()
|
|
110
111
|
urllib.request.install_opener(opener)
|
|
@@ -115,11 +116,12 @@ def download_examples(*args):
|
|
|
115
116
|
|
|
116
117
|
with ZipFile(local_filename, "r") as z:
|
|
117
118
|
target = path / "rtc-tools-examples"
|
|
118
|
-
|
|
119
|
+
zip_folder_name = next(x for x in z.namelist() if x.startswith("Deltares-rtc-tools-"))
|
|
120
|
+
prefix = "{}/examples/".format(zip_folder_name.rstrip("/"))
|
|
119
121
|
members = [x for x in z.namelist() if x.startswith(prefix)]
|
|
120
122
|
z.extractall(members=members)
|
|
121
123
|
shutil.move(prefix, target)
|
|
122
|
-
shutil.rmtree(
|
|
124
|
+
shutil.rmtree(zip_folder_name)
|
|
123
125
|
|
|
124
126
|
sys.exit("Succesfully downloaded the RTC-Tools examples to '{}'".format(target.resolve()))
|
|
125
127
|
|
rtctools/simulation/io_mixin.py
CHANGED
|
@@ -94,7 +94,7 @@ class IOMixin(SimulationProblem, metaclass=ABCMeta):
|
|
|
94
94
|
self.__cache_loop_timeseries = {}
|
|
95
95
|
|
|
96
96
|
timeseries_names = set(self.io.get_timeseries_names(0))
|
|
97
|
-
for v in self.
|
|
97
|
+
for v in self.get_input_variables():
|
|
98
98
|
if v in timeseries_names:
|
|
99
99
|
_, values = self.io.get_timeseries_sec(v)
|
|
100
100
|
self.__cache_loop_timeseries[v] = values
|
rtctools/simulation/pi_mixin.py
CHANGED
|
@@ -240,3 +240,16 @@ class PIMixin(IOMixin):
|
|
|
240
240
|
def get_timeseries(self, variable):
|
|
241
241
|
_, values = self.io.get_timeseries(variable)
|
|
242
242
|
return values
|
|
243
|
+
|
|
244
|
+
def set_unit(self, variable: str, unit: str):
|
|
245
|
+
"""
|
|
246
|
+
Set the unit of a time series.
|
|
247
|
+
|
|
248
|
+
:param variable: Time series ID.
|
|
249
|
+
:param unit: Unit.
|
|
250
|
+
"""
|
|
251
|
+
assert hasattr(self, "_PIMixin__timeseries_import"), (
|
|
252
|
+
"set_unit can only be called after read() in pre() has finished."
|
|
253
|
+
)
|
|
254
|
+
self.__timeseries_import.set_unit(variable, unit, 0)
|
|
255
|
+
self.__timeseries_export.set_unit(variable, unit, 0)
|
|
@@ -1,13 +1,21 @@
|
|
|
1
1
|
import copy
|
|
2
|
+
import importlib.resources
|
|
2
3
|
import itertools
|
|
3
4
|
import logging
|
|
4
5
|
import math
|
|
6
|
+
import sys
|
|
5
7
|
from collections import OrderedDict
|
|
6
8
|
from typing import List, Union
|
|
7
9
|
|
|
10
|
+
# Python 3.9's importlib.metadata does not support the "group" parameter to
|
|
11
|
+
# entry_points yet.
|
|
12
|
+
if sys.version_info < (3, 10):
|
|
13
|
+
import importlib_metadata
|
|
14
|
+
else:
|
|
15
|
+
from importlib import metadata as importlib_metadata
|
|
16
|
+
|
|
8
17
|
import casadi as ca
|
|
9
18
|
import numpy as np
|
|
10
|
-
import pkg_resources
|
|
11
19
|
import pymoca
|
|
12
20
|
import pymoca.backends.casadi.api
|
|
13
21
|
|
|
@@ -82,9 +90,21 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
82
90
|
model_name = self.__class__.__name__
|
|
83
91
|
|
|
84
92
|
# Load model from pymoca backend
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
93
|
+
compiler_options = self.compiler_options()
|
|
94
|
+
logger.info(f"Loading/compiling model {model_name}.")
|
|
95
|
+
try:
|
|
96
|
+
self.__pymoca_model = pymoca.backends.casadi.api.transfer_model(
|
|
97
|
+
kwargs["model_folder"], model_name, compiler_options
|
|
98
|
+
)
|
|
99
|
+
except RuntimeError as error:
|
|
100
|
+
if compiler_options.get("cache", False):
|
|
101
|
+
raise error
|
|
102
|
+
compiler_options["cache"] = False
|
|
103
|
+
logger.warning(f"Loading model {model_name} using a cache file failed: {error}.")
|
|
104
|
+
logger.info(f"Compiling model {model_name}.")
|
|
105
|
+
self.__pymoca_model = pymoca.backends.casadi.api.transfer_model(
|
|
106
|
+
kwargs["model_folder"], model_name, compiler_options
|
|
107
|
+
)
|
|
88
108
|
|
|
89
109
|
# Extract the CasADi MX variables used in the model
|
|
90
110
|
self.__mx = {}
|
|
@@ -319,6 +339,17 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
319
339
|
"""
|
|
320
340
|
Initialize state vector with default values
|
|
321
341
|
|
|
342
|
+
Initial values are first read from the given Modelica files.
|
|
343
|
+
If an initial value equals zero or is not provided by a Modelica file,
|
|
344
|
+
and the variable is not marked as fixed,
|
|
345
|
+
then the initial value is tried to be set with the initial_state method.
|
|
346
|
+
When using CSVMixin, this method by default looks for initial values
|
|
347
|
+
in an initial_state.csv file.
|
|
348
|
+
Furthermore, if a variable is not marked as fixed
|
|
349
|
+
and no initial value is given by the initial_state method,
|
|
350
|
+
the initial value can be overwritten using the seed method.
|
|
351
|
+
When a variable is marked as fixed, the initial value is only read from the Modelica file.
|
|
352
|
+
|
|
322
353
|
:param config_file: Path to an initialization file.
|
|
323
354
|
"""
|
|
324
355
|
if config_file:
|
|
@@ -393,29 +424,33 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
393
424
|
for var in itertools.chain(self.__pymoca_model.states, self.__pymoca_model.alg_states):
|
|
394
425
|
var_name = var.symbol.name()
|
|
395
426
|
var_nominal = self.get_variable_nominal(var_name)
|
|
427
|
+
start_values = {}
|
|
396
428
|
|
|
397
429
|
# Attempt to cast var.start to python type
|
|
398
430
|
mx_start = ca.MX(var.start)
|
|
399
431
|
if mx_start.is_constant():
|
|
400
432
|
# cast var.start to python type
|
|
401
|
-
|
|
433
|
+
start_value_pymoca = var.python_type(mx_start.to_DM())
|
|
434
|
+
if start_value_pymoca is not None and start_value_pymoca != 0:
|
|
435
|
+
start_values["modelica"] = start_value_pymoca
|
|
402
436
|
else:
|
|
403
|
-
|
|
404
|
-
start_val = None
|
|
437
|
+
start_values["modelica"] = mx_start
|
|
405
438
|
|
|
406
|
-
if
|
|
439
|
+
if not var.fixed:
|
|
407
440
|
# To make initialization easier, we allow setting initial states by providing
|
|
408
441
|
# timeseries with names that match a symbol in the model. We only check for this
|
|
409
442
|
# matching if the start and fixed attributes were left as default
|
|
410
443
|
try:
|
|
411
|
-
|
|
444
|
+
start_values["initial_state"] = self.initial_state()[var_name]
|
|
412
445
|
except KeyError:
|
|
413
446
|
pass
|
|
414
447
|
else:
|
|
415
448
|
# An initial state was found- add it to the constrained residuals
|
|
416
449
|
logger.debug(
|
|
417
450
|
"Initialize: Added {} = {} to initial equations "
|
|
418
|
-
"(found matching timeseries).".format(
|
|
451
|
+
"(found matching timeseries).".format(
|
|
452
|
+
var_name, start_values["initial_state"]
|
|
453
|
+
)
|
|
419
454
|
)
|
|
420
455
|
# Set var to be fixed
|
|
421
456
|
var.fixed = True
|
|
@@ -425,36 +460,82 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
425
460
|
# timeseries with names that match a symbol in the model. We only check for this
|
|
426
461
|
# matching if the start and fixed attributes were left as default
|
|
427
462
|
try:
|
|
428
|
-
|
|
463
|
+
start_values["seed"] = self.seed()[var_name]
|
|
429
464
|
except KeyError:
|
|
430
465
|
pass
|
|
431
466
|
else:
|
|
432
467
|
# An initial state was found- add it to the constrained residuals
|
|
433
468
|
logger.debug(
|
|
434
469
|
"Initialize: Added {} = {} as initial guess "
|
|
435
|
-
"(found matching timeseries).".format(var_name,
|
|
470
|
+
"(found matching timeseries).".format(var_name, start_values["seed"])
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# Set the start value based on the different inputs.
|
|
474
|
+
if "seed" in start_values:
|
|
475
|
+
input_source = "seed"
|
|
476
|
+
source_description = "seed method"
|
|
477
|
+
elif "modelica" in start_values:
|
|
478
|
+
input_source = "modelica"
|
|
479
|
+
source_description = "modelica file"
|
|
480
|
+
elif "initial_state" in start_values:
|
|
481
|
+
input_source = "initial_state"
|
|
482
|
+
source_description = "initial_state method (typically reads initial_state.csv)"
|
|
483
|
+
else:
|
|
484
|
+
start_values["modelica"] = start_value_pymoca
|
|
485
|
+
input_source = "modelica"
|
|
486
|
+
source_description = "modelica file or default value"
|
|
487
|
+
start_val = start_values.get(input_source, None)
|
|
488
|
+
start_is_numeric = start_val is not None and not isinstance(start_val, ca.MX)
|
|
489
|
+
numeric_start_val = start_val if start_is_numeric else 0.0
|
|
490
|
+
if len(start_values) > 1:
|
|
491
|
+
logger.warning(
|
|
492
|
+
"Initialize: Multiple initial values for {} are provided: {}.".format(
|
|
493
|
+
var_name, start_values
|
|
436
494
|
)
|
|
495
|
+
+ " Value from {} will be used to continue.".format(source_description)
|
|
496
|
+
)
|
|
437
497
|
|
|
438
498
|
# Attempt to set start_val in the state vector. Default to zero if unknown.
|
|
439
499
|
try:
|
|
440
|
-
self.set_var(var_name,
|
|
500
|
+
self.set_var(var_name, numeric_start_val)
|
|
441
501
|
except KeyError:
|
|
442
502
|
logger.warning(
|
|
443
|
-
"Initialize: {} not found in state vector. "
|
|
444
|
-
|
|
503
|
+
"Initialize: {} not found in state vector. Initial value of {} not set.".format(
|
|
504
|
+
var_name, numeric_start_val
|
|
505
|
+
)
|
|
445
506
|
)
|
|
446
507
|
|
|
447
508
|
# Add a residual for the difference between the state and its starting expression
|
|
448
|
-
start_expr = start_val
|
|
509
|
+
start_expr = start_val
|
|
510
|
+
min_is_symbolic = isinstance(var.min, ca.MX)
|
|
511
|
+
max_is_symbolic = isinstance(var.max, ca.MX)
|
|
449
512
|
if var.fixed:
|
|
450
513
|
# Set bounds to be equal to each other, such that IPOPT can
|
|
451
514
|
# turn the decision variable into a parameter.
|
|
515
|
+
if min_is_symbolic or max_is_symbolic or var.min != -np.inf or var.max != np.inf:
|
|
516
|
+
logger.info(
|
|
517
|
+
"Initialize: bounds of {} will be overwritten".format(var_name)
|
|
518
|
+
+ " by the start value given by {}.".format(source_description)
|
|
519
|
+
)
|
|
452
520
|
var.min = start_expr
|
|
453
521
|
var.max = start_expr
|
|
454
522
|
else:
|
|
455
523
|
# minimize residual
|
|
456
524
|
minimized_residuals.append((var.symbol - start_expr) / var_nominal)
|
|
457
525
|
|
|
526
|
+
# Check that the start_value is in between the variable bounds.
|
|
527
|
+
if start_is_numeric and not min_is_symbolic and not max_is_symbolic:
|
|
528
|
+
if not (var.min <= start_val and start_val <= var.max):
|
|
529
|
+
logger.log(
|
|
530
|
+
(
|
|
531
|
+
logging.WARNING
|
|
532
|
+
if source_description != "modelica file or default value"
|
|
533
|
+
else logging.DEBUG
|
|
534
|
+
),
|
|
535
|
+
f"Initialize: start value {var_name} = {start_val} "
|
|
536
|
+
f"is not in between bounds {var.min} and {var.max} and will be adjusted.",
|
|
537
|
+
)
|
|
538
|
+
|
|
458
539
|
# Default start var for ders is zero
|
|
459
540
|
for der_var in self.__mx["derivatives"]:
|
|
460
541
|
self.set_var(der_var.name(), 0.0)
|
|
@@ -610,7 +691,15 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
610
691
|
# If unsuccessful, stop.
|
|
611
692
|
return_status = solver.stats()["return_status"]
|
|
612
693
|
if return_status not in {"Solve_Succeeded", "Solved_To_Acceptable_Level"}:
|
|
613
|
-
|
|
694
|
+
if return_status == "Infeasible_Problem_Detected":
|
|
695
|
+
message = (
|
|
696
|
+
"Initialization Failed with return status: {}. ".format(return_status)
|
|
697
|
+
+ "This means no initial state could be found "
|
|
698
|
+
+ "that satisfies all equations and constraints."
|
|
699
|
+
)
|
|
700
|
+
else:
|
|
701
|
+
message = "Initialization Failed with return status: {}. ".format(return_status)
|
|
702
|
+
raise Exception(message)
|
|
614
703
|
|
|
615
704
|
# Update state vector with initial conditions
|
|
616
705
|
self.__state_vector[: self.__n_states] = initial_state["x"][: self.__n_states].T
|
|
@@ -762,6 +851,8 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
762
851
|
self.set_var("time", self.get_current_time() + dt)
|
|
763
852
|
|
|
764
853
|
# take a step
|
|
854
|
+
if np.isnan(self.__state_vector).any():
|
|
855
|
+
logger.error("Found a nan in the state vector (before making the step)")
|
|
765
856
|
guess = self.__state_vector[: self.__n_states]
|
|
766
857
|
if len(self.__mx["parameters"]) > 0:
|
|
767
858
|
next_state = self.__do_step(
|
|
@@ -769,6 +860,23 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
769
860
|
)
|
|
770
861
|
else:
|
|
771
862
|
next_state = self.__do_step(guess, dt, self.__state_vector)
|
|
863
|
+
|
|
864
|
+
try:
|
|
865
|
+
if np.isnan(next_state).any():
|
|
866
|
+
index_to_name = {index[0]: name for name, index in self.__indices.items()}
|
|
867
|
+
named_next_state = {
|
|
868
|
+
index_to_name[i]: float(next_state[i]) for i in range(0, next_state.shape[0])
|
|
869
|
+
}
|
|
870
|
+
variables_with_nan = [
|
|
871
|
+
name for name, value in named_next_state.items() if np.isnan(value)
|
|
872
|
+
]
|
|
873
|
+
if variables_with_nan:
|
|
874
|
+
logger.error(
|
|
875
|
+
f"Found nan(s) in the next_state vector for:\n\t {variables_with_nan}"
|
|
876
|
+
)
|
|
877
|
+
except (KeyError, IndexError, TypeError):
|
|
878
|
+
logger.warning("Something went wrong while checking for nans in the next_state vector")
|
|
879
|
+
|
|
772
880
|
# Check convergence of rootfinder
|
|
773
881
|
rootfinder_stats = self.__do_step.stats()
|
|
774
882
|
|
|
@@ -964,9 +1072,9 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
964
1072
|
:param dt: Timestep size of the simulation.
|
|
965
1073
|
"""
|
|
966
1074
|
if self._dt_is_fixed:
|
|
967
|
-
assert math.isclose(
|
|
968
|
-
|
|
969
|
-
)
|
|
1075
|
+
assert math.isclose(self.__dt, dt), (
|
|
1076
|
+
"Timestep size dt is marked as constant and cannot be changed."
|
|
1077
|
+
)
|
|
970
1078
|
else:
|
|
971
1079
|
self.__dt = dt
|
|
972
1080
|
|
|
@@ -1149,9 +1257,9 @@ class SimulationProblem(DataStoreAccessor):
|
|
|
1149
1257
|
# Where imported model libraries are located.
|
|
1150
1258
|
library_folders = self.modelica_library_folders.copy()
|
|
1151
1259
|
|
|
1152
|
-
for ep in
|
|
1260
|
+
for ep in importlib_metadata.entry_points(group="rtctools.libraries.modelica"):
|
|
1153
1261
|
if ep.name == "library_folder":
|
|
1154
|
-
library_folders.append(
|
|
1262
|
+
library_folders.append(str(importlib.resources.files(ep.module).joinpath(ep.attr)))
|
|
1155
1263
|
|
|
1156
1264
|
compiler_options["library_folders"] = library_folders
|
|
1157
1265
|
|
rtctools/util.py
CHANGED
rtc_tools-2.6.1.dist-info/RECORD
DELETED
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
rtctools/__init__.py,sha256=91hvS2-ryd2Pvw0COpsUzTwJwSnTZ035REiej-1hNI4,107
|
|
2
|
-
rtctools/_version.py,sha256=2OZVO8ezWtCbihHdmc9B5Bt65ImW3guaiFSxxQKyAhA,497
|
|
3
|
-
rtctools/rtctoolsapp.py,sha256=UnkuiJhv0crEEVs8H6PYvMuc2y_q6V_xLuyKEgXj9GM,4200
|
|
4
|
-
rtctools/util.py,sha256=PaeKfDUA174ODZbY5fZjCTf-F-TdhW7yEuP189Ro190,9075
|
|
5
|
-
rtctools/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
rtctools/_internal/alias_tools.py,sha256=XuQSAhhFuVtwn0yrAObZWIKPsSF4j2axXRtEmitIFPs,5310
|
|
7
|
-
rtctools/_internal/caching.py,sha256=p4gqSL7kCI7Hff-KjMEP7mhJCQSiU_lYm2MR7E18gBM,905
|
|
8
|
-
rtctools/_internal/casadi_helpers.py,sha256=oAf5zyFkZbaMhvhgMnQkOi2A6dBOzj-VAUkYwBf-Jxk,1410
|
|
9
|
-
rtctools/_internal/debug_check_helpers.py,sha256=UgQTEPw4PyR7MbYLewSSWQqTwQj7xr5yUBk820O9Kk4,1084
|
|
10
|
-
rtctools/data/__init__.py,sha256=EllgSmCdrlvQZSd1VilvjPaeYJGhY9ErPiQtedmuFoA,157
|
|
11
|
-
rtctools/data/csv.py,sha256=iYOEED3AaNxt4ews_aAkHfl9Tq9a-9vjxvYwjVR_lQE,5231
|
|
12
|
-
rtctools/data/netcdf.py,sha256=xpk4Ggl7gItNG6lO7p3OJPR-elK8_CiCtxUI7cX0gwk,19109
|
|
13
|
-
rtctools/data/pi.py,sha256=Ni1hBDdhQdcWYO-NUPhKA1WJdzbSXjZc5w5xauOBcJM,45437
|
|
14
|
-
rtctools/data/rtc.py,sha256=1yGJZGq2Z36MYLiLuZaHnxupL4mgw-Wuu54PAG05kcM,9077
|
|
15
|
-
rtctools/data/storage.py,sha256=67J4BRTl0AMEzlKNZ8Xdpy_4cGtwx8Lo_tL2n0G4S9w,13206
|
|
16
|
-
rtctools/data/interpolation/__init__.py,sha256=GBubCIT5mFoSTV-lOk7cpwvZekNMEe5bvqSQJ9HE34M,73
|
|
17
|
-
rtctools/data/interpolation/bspline.py,sha256=qevB842XWCH3fWlWMBqKMy1mw37ust-0YtSnb9PKCEc,948
|
|
18
|
-
rtctools/data/interpolation/bspline1d.py,sha256=hQrok4rrBcJV_HciuFjZYSwrSP8w_VufQRP6JLZhA7U,6106
|
|
19
|
-
rtctools/data/interpolation/bspline2d.py,sha256=ScmX0fPDxbUVtj3pbUE0L7UJocqroD_6fUT-4cvdRMc,1693
|
|
20
|
-
rtctools/optimization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
-
rtctools/optimization/collocated_integrated_optimization_problem.py,sha256=Bv1a1O4QTAdIpe9SOA9_oEDDph-oj-9q5yfN4SFMdW8,131343
|
|
22
|
-
rtctools/optimization/control_tree_mixin.py,sha256=CC6TWp3kFQgMokx6213pRLx9iY17Fd0VcwG4Wpwa0Uo,8974
|
|
23
|
-
rtctools/optimization/csv_lookup_table_mixin.py,sha256=h4WKuPR1BJbYuJfQ9lx4rXalB6yYK-zajINabRL7BrA,17298
|
|
24
|
-
rtctools/optimization/csv_mixin.py,sha256=sRp5paHWfCw2bz-23Nw-HdFLS3CZTpVwaBdFo98DbvE,12252
|
|
25
|
-
rtctools/optimization/goal_programming_mixin.py,sha256=GK25DrbAY_rMsra080pSCDZwzLQNN2Ppd-2d0_FEllg,32999
|
|
26
|
-
rtctools/optimization/goal_programming_mixin_base.py,sha256=oh9CsEiyYTmthcfvRbX-9Z9bIo6SHv_DCiVt9kx0sjI,43781
|
|
27
|
-
rtctools/optimization/homotopy_mixin.py,sha256=Kh0kMfxB-Fo1FBGW5tPOQk24Xx_Mmw_p0YuSQotdkMU,6905
|
|
28
|
-
rtctools/optimization/initial_state_estimation_mixin.py,sha256=74QYfG-VYYTNVg-kAnCG6QoY3_sUmaID0ideF7bPkkY,3116
|
|
29
|
-
rtctools/optimization/io_mixin.py,sha256=AsZQ7YOUcUbWoczmjTXaSje5MUEsPNbQyZBJ6qzSjzU,11821
|
|
30
|
-
rtctools/optimization/linearization_mixin.py,sha256=mG5S7uwvwDydw-eBPyQKnLyKoy08EBjQh25vu97afhY,1049
|
|
31
|
-
rtctools/optimization/linearized_order_goal_programming_mixin.py,sha256=LQ2qpYt0YGLpEoerif4FJ5wwzq16q--27bsRjcqIU5A,9087
|
|
32
|
-
rtctools/optimization/min_abs_goal_programming_mixin.py,sha256=WMOv9EF8cfDJgTunzXfI_cUmBSQK26u1HJB_9EAarfM,14031
|
|
33
|
-
rtctools/optimization/modelica_mixin.py,sha256=ysVMayNA4sSFoHkSdhjWOxT6UzOVbN0ZeM4v-RpvZXE,17161
|
|
34
|
-
rtctools/optimization/netcdf_mixin.py,sha256=-zkXh3sMYE50c3kHsrmUVGWMSFm-0cXQpGrCm0yn-Tc,7563
|
|
35
|
-
rtctools/optimization/optimization_problem.py,sha256=qzpc81NaZMeoXKuayFmBF15iXYuNAk5yxmaER_Gcz_A,44131
|
|
36
|
-
rtctools/optimization/pi_mixin.py,sha256=63qda6i7hjtDuP3hL6RO29vCCP11aUpR9B4KoqlLFVI,11314
|
|
37
|
-
rtctools/optimization/planning_mixin.py,sha256=O_Y74X8xZmaNZR4iYOe7BR06s9hnmcapbuHYHQTBPPQ,724
|
|
38
|
-
rtctools/optimization/single_pass_goal_programming_mixin.py,sha256=Zb9szg3PGT2o6gkGsXluSfEaAswkw3TKfPQDzUrj_Y4,25784
|
|
39
|
-
rtctools/optimization/timeseries.py,sha256=nCrsGCJThBMh9lvngEpbBDa834_QvklVvkxJqwX4a1M,1734
|
|
40
|
-
rtctools/simulation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
|
-
rtctools/simulation/csv_mixin.py,sha256=rGDUFPsqGHmF0_dWdXeWzWzMpkPmwCNweTBVrwSh31g,6704
|
|
42
|
-
rtctools/simulation/io_mixin.py,sha256=SJasNGI--OQ9Y-Z61oeeaGCxSrNddYz4AOVfJYbmf74,6209
|
|
43
|
-
rtctools/simulation/pi_mixin.py,sha256=uwl61LYjb8dmMz910EB2-bC0KSuhLzsrJzk0hxWYEhk,9359
|
|
44
|
-
rtctools/simulation/simulation_problem.py,sha256=gTAimG2MLw_TTkeHLkIMxpYgAmR-voqzvje7pcFnw4U,44556
|
|
45
|
-
rtc_tools-2.6.1.dist-info/COPYING.LESSER,sha256=46mU2C5kSwOnkqkw9XQAJlhBL2JAf1_uCD8lVcXyMRg,7652
|
|
46
|
-
rtc_tools-2.6.1.dist-info/METADATA,sha256=xRGaIE4XUCeH33cpMwv5LI_EbIqlYyoOChU6Epg8VAA,1492
|
|
47
|
-
rtc_tools-2.6.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
48
|
-
rtc_tools-2.6.1.dist-info/entry_points.txt,sha256=-x622IB_l1duw2M6t6syfQ4yzOiQTp0IZxKGcYRgWgk,151
|
|
49
|
-
rtc_tools-2.6.1.dist-info/top_level.txt,sha256=pnBrb58PFPd1kp1dqa-JHU7R55h3alDNJIJnF3Jf9Dw,9
|
|
50
|
-
rtc_tools-2.6.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|