floodmodeller-api 0.5.0.post1__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- floodmodeller_api/__init__.py +11 -1
- floodmodeller_api/_base.py +55 -36
- floodmodeller_api/backup.py +15 -12
- floodmodeller_api/dat.py +191 -121
- floodmodeller_api/diff.py +4 -4
- floodmodeller_api/hydrology_plus/hydrology_plus_export.py +15 -14
- floodmodeller_api/ied.py +8 -10
- floodmodeller_api/ief.py +56 -42
- floodmodeller_api/ief_flags.py +1 -1
- floodmodeller_api/inp.py +7 -10
- floodmodeller_api/logs/lf.py +25 -26
- floodmodeller_api/logs/lf_helpers.py +20 -20
- floodmodeller_api/logs/lf_params.py +1 -5
- floodmodeller_api/mapping.py +11 -2
- floodmodeller_api/test/__init__.py +2 -2
- floodmodeller_api/test/conftest.py +2 -3
- floodmodeller_api/test/test_backup.py +2 -2
- floodmodeller_api/test/test_conveyance.py +13 -7
- floodmodeller_api/test/test_dat.py +168 -20
- floodmodeller_api/test/test_data/EX18_DAT_expected.json +164 -144
- floodmodeller_api/test/test_data/EX3_DAT_expected.json +6 -2
- floodmodeller_api/test/test_data/EX6_DAT_expected.json +12 -46
- floodmodeller_api/test/test_data/encoding_test_cp1252.dat +1081 -0
- floodmodeller_api/test/test_data/encoding_test_utf8.dat +1081 -0
- floodmodeller_api/test/test_data/integrated_bridge/AR_NoSP_NoBl_2O_NO_OneFRC.ied +33 -0
- floodmodeller_api/test/test_data/integrated_bridge/AR_vSP_25pc_1O.ied +32 -0
- floodmodeller_api/test/test_data/integrated_bridge/PL_vSP_25pc_1O.ied +34 -0
- floodmodeller_api/test/test_data/integrated_bridge/SBTwoFRCsStaggered.IED +32 -0
- floodmodeller_api/test/test_data/integrated_bridge/US_NoSP_NoBl_OR_RN.ied +28 -0
- floodmodeller_api/test/test_data/integrated_bridge/US_SP_NoBl_OR_frc_PT2-5_RN.ied +34 -0
- floodmodeller_api/test/test_data/integrated_bridge/US_fSP_NoBl_1O.ied +30 -0
- floodmodeller_api/test/test_data/integrated_bridge/US_nSP_NoBl_1O.ied +49 -0
- floodmodeller_api/test/test_data/integrated_bridge/US_vSP_NoBl_2O_Para.ied +35 -0
- floodmodeller_api/test/test_data/integrated_bridge.dat +40 -0
- floodmodeller_api/test/test_data/network.ied +2 -2
- floodmodeller_api/test/test_data/network_dat_expected.json +141 -243
- floodmodeller_api/test/test_data/network_ied_expected.json +2 -2
- floodmodeller_api/test/test_data/network_with_comments.ied +2 -2
- floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
- floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
- floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
- floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
- floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
- floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
- floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
- floodmodeller_api/test/test_flowtimeprofile.py +2 -2
- floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
- floodmodeller_api/test/test_ied.py +3 -3
- floodmodeller_api/test/test_ief.py +12 -4
- floodmodeller_api/test/test_inp.py +2 -2
- floodmodeller_api/test/test_integrated_bridge.py +159 -0
- floodmodeller_api/test/test_json.py +14 -13
- floodmodeller_api/test/test_logs_lf.py +50 -29
- floodmodeller_api/test/test_read_file.py +1 -0
- floodmodeller_api/test/test_river.py +12 -12
- floodmodeller_api/test/test_tool.py +8 -5
- floodmodeller_api/test/test_toolbox_structure_log.py +148 -158
- floodmodeller_api/test/test_xml2d.py +14 -16
- floodmodeller_api/test/test_zz.py +143 -0
- floodmodeller_api/to_from_json.py +9 -9
- floodmodeller_api/tool.py +15 -11
- floodmodeller_api/toolbox/example_tool.py +5 -1
- floodmodeller_api/toolbox/model_build/add_siltation_definition.py +13 -9
- floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +500 -194
- floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
- floodmodeller_api/units/__init__.py +15 -0
- floodmodeller_api/units/_base.py +87 -20
- floodmodeller_api/units/_helpers.py +343 -0
- floodmodeller_api/units/boundaries.py +59 -71
- floodmodeller_api/units/comment.py +1 -1
- floodmodeller_api/units/conduits.py +57 -54
- floodmodeller_api/units/connectors.py +112 -0
- floodmodeller_api/units/controls.py +107 -0
- floodmodeller_api/units/conveyance.py +1 -1
- floodmodeller_api/units/iic.py +2 -9
- floodmodeller_api/units/losses.py +44 -45
- floodmodeller_api/units/sections.py +52 -51
- floodmodeller_api/units/structures.py +361 -531
- floodmodeller_api/units/units.py +27 -26
- floodmodeller_api/units/unsupported.py +5 -7
- floodmodeller_api/units/variables.py +2 -2
- floodmodeller_api/urban1d/_base.py +13 -17
- floodmodeller_api/urban1d/conduits.py +11 -21
- floodmodeller_api/urban1d/general_parameters.py +1 -1
- floodmodeller_api/urban1d/junctions.py +7 -11
- floodmodeller_api/urban1d/losses.py +13 -17
- floodmodeller_api/urban1d/outfalls.py +18 -22
- floodmodeller_api/urban1d/raingauges.py +5 -10
- floodmodeller_api/urban1d/subsections.py +5 -4
- floodmodeller_api/urban1d/xsections.py +14 -17
- floodmodeller_api/util.py +23 -6
- floodmodeller_api/validation/parameters.py +7 -3
- floodmodeller_api/validation/urban_parameters.py +1 -4
- floodmodeller_api/validation/validation.py +11 -5
- floodmodeller_api/version.py +1 -1
- floodmodeller_api/xml2d.py +27 -31
- floodmodeller_api/xml2d_template.py +1 -1
- floodmodeller_api/zz.py +539 -0
- {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/LICENSE.txt +1 -1
- {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/METADATA +30 -16
- {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/RECORD +116 -83
- {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/WHEEL +1 -1
- floodmodeller_api/test/test_zzn.py +0 -36
- floodmodeller_api/units/helpers.py +0 -123
- floodmodeller_api/zzn.py +0 -414
- /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
- {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/entry_points.txt +0 -0
- {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/top_level.txt +0 -0
floodmodeller_api/zzn.py
DELETED
|
@@ -1,414 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Flood Modeller Python API
|
|
3
|
-
Copyright (C) 2024 Jacobs U.K. Limited
|
|
4
|
-
|
|
5
|
-
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
|
|
6
|
-
as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
|
7
|
-
|
|
8
|
-
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
|
|
9
|
-
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
|
10
|
-
|
|
11
|
-
You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
|
|
12
|
-
|
|
13
|
-
If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
|
|
14
|
-
address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
from __future__ import annotations
|
|
18
|
-
|
|
19
|
-
import ctypes as ct
|
|
20
|
-
from pathlib import Path
|
|
21
|
-
from typing import Any
|
|
22
|
-
|
|
23
|
-
import numpy as np
|
|
24
|
-
import pandas as pd
|
|
25
|
-
|
|
26
|
-
from ._base import FMFile
|
|
27
|
-
from .to_from_json import to_json
|
|
28
|
-
from .util import handle_exception, is_windows
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class ZZN(FMFile):
|
|
32
|
-
"""Reads and processes Flood Modeller 1D binary results format '.zzn'
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
zzn_filepath (str): Full filepath to model zzn file
|
|
36
|
-
|
|
37
|
-
Output:
|
|
38
|
-
Initiates 'ZZN' class object
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
_filetype: str = "ZZN"
|
|
42
|
-
_suffix: str = ".zzn"
|
|
43
|
-
|
|
44
|
-
@handle_exception(when="read")
|
|
45
|
-
def __init__( # noqa: PLR0915
|
|
46
|
-
self,
|
|
47
|
-
zzn_filepath: str | Path | None = None,
|
|
48
|
-
from_json: bool = False,
|
|
49
|
-
):
|
|
50
|
-
if from_json:
|
|
51
|
-
return
|
|
52
|
-
FMFile.__init__(self, zzn_filepath)
|
|
53
|
-
|
|
54
|
-
# Get zzn_dll path
|
|
55
|
-
lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
|
|
56
|
-
zzn_dll = Path(__file__).resolve().parent / "libs" / lib
|
|
57
|
-
|
|
58
|
-
# Catch LD_LIBRARY_PATH error for linux
|
|
59
|
-
try:
|
|
60
|
-
zzn_read = ct.CDLL(str(zzn_dll))
|
|
61
|
-
except OSError as e:
|
|
62
|
-
msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
|
|
63
|
-
if msg_1 in str(e):
|
|
64
|
-
msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
|
|
65
|
-
raise OSError(msg_2) from e
|
|
66
|
-
raise
|
|
67
|
-
|
|
68
|
-
# Get zzl path
|
|
69
|
-
zzn = self._filepath
|
|
70
|
-
zzl = zzn.with_suffix(".zzl")
|
|
71
|
-
if not zzl.exists():
|
|
72
|
-
raise FileNotFoundError(
|
|
73
|
-
"Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name.",
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
self.meta: dict[str, Any] = {} # Dict object to hold all metadata
|
|
77
|
-
self.data = {} # Dict object to hold all data
|
|
78
|
-
|
|
79
|
-
# PROCESS_ZZL
|
|
80
|
-
self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
|
|
81
|
-
self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
|
|
82
|
-
self.meta["model_title"] = ct.create_string_buffer(b"", 128)
|
|
83
|
-
self.meta["nnodes"] = ct.c_int(0)
|
|
84
|
-
self.meta["label_length"] = ct.c_int(0)
|
|
85
|
-
self.meta["dt"] = ct.c_float(0.0)
|
|
86
|
-
self.meta["timestep0"] = ct.c_int(0)
|
|
87
|
-
self.meta["ltimestep"] = ct.c_int(0)
|
|
88
|
-
self.meta["save_int"] = ct.c_float(0.0)
|
|
89
|
-
self.meta["is_quality"] = ct.c_bool(False)
|
|
90
|
-
self.meta["nvars"] = ct.c_int(0)
|
|
91
|
-
self.meta["tzero"] = (ct.c_int * 5)()
|
|
92
|
-
self.meta["errstat"] = ct.c_int(0)
|
|
93
|
-
zzn_read.process_zzl(
|
|
94
|
-
ct.byref(self.meta["zzl_name"]),
|
|
95
|
-
ct.byref(self.meta["model_title"]),
|
|
96
|
-
ct.byref(self.meta["nnodes"]),
|
|
97
|
-
ct.byref(self.meta["label_length"]),
|
|
98
|
-
ct.byref(self.meta["dt"]),
|
|
99
|
-
ct.byref(self.meta["timestep0"]),
|
|
100
|
-
ct.byref(self.meta["ltimestep"]),
|
|
101
|
-
ct.byref(self.meta["save_int"]),
|
|
102
|
-
ct.byref(self.meta["is_quality"]),
|
|
103
|
-
ct.byref(self.meta["nvars"]),
|
|
104
|
-
ct.byref(self.meta["tzero"]),
|
|
105
|
-
ct.byref(self.meta["errstat"]),
|
|
106
|
-
)
|
|
107
|
-
# PROCESS_LABELS
|
|
108
|
-
self.meta["labels"] = (
|
|
109
|
-
ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
|
|
110
|
-
)()
|
|
111
|
-
zzn_read.process_labels(
|
|
112
|
-
ct.byref(self.meta["zzl_name"]),
|
|
113
|
-
ct.byref(self.meta["nnodes"]),
|
|
114
|
-
ct.byref(self.meta["label_length"]),
|
|
115
|
-
ct.byref(self.meta["errstat"]),
|
|
116
|
-
)
|
|
117
|
-
for i in range(self.meta["nnodes"].value):
|
|
118
|
-
zzn_read.get_zz_label(
|
|
119
|
-
ct.byref(ct.c_int(i + 1)),
|
|
120
|
-
ct.byref(self.meta["labels"][i]),
|
|
121
|
-
ct.byref(self.meta["errstat"]),
|
|
122
|
-
)
|
|
123
|
-
# PREPROCESS_ZZN
|
|
124
|
-
last_hr = (
|
|
125
|
-
(self.meta["ltimestep"].value - self.meta["timestep0"].value)
|
|
126
|
-
* self.meta["dt"].value
|
|
127
|
-
/ 3600
|
|
128
|
-
)
|
|
129
|
-
self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
|
|
130
|
-
self.meta["aitimestep"] = (ct.c_int * 2)(
|
|
131
|
-
self.meta["timestep0"].value,
|
|
132
|
-
self.meta["ltimestep"].value,
|
|
133
|
-
)
|
|
134
|
-
self.meta["isavint"] = (ct.c_int * 2)()
|
|
135
|
-
zzn_read.preprocess_zzn(
|
|
136
|
-
ct.byref(self.meta["output_hrs"]),
|
|
137
|
-
ct.byref(self.meta["aitimestep"]),
|
|
138
|
-
ct.byref(self.meta["dt"]),
|
|
139
|
-
ct.byref(self.meta["timestep0"]),
|
|
140
|
-
ct.byref(self.meta["ltimestep"]),
|
|
141
|
-
ct.byref(self.meta["save_int"]),
|
|
142
|
-
ct.byref(self.meta["isavint"]),
|
|
143
|
-
)
|
|
144
|
-
# PROCESS_ZZN
|
|
145
|
-
self.meta["node_ID"] = ct.c_int(-1)
|
|
146
|
-
self.meta["savint_skip"] = ct.c_int(1)
|
|
147
|
-
self.meta["savint_range"] = ct.c_int(
|
|
148
|
-
int(
|
|
149
|
-
(self.meta["isavint"][1] - self.meta["isavint"][0])
|
|
150
|
-
/ self.meta["savint_skip"].value,
|
|
151
|
-
),
|
|
152
|
-
)
|
|
153
|
-
nx = self.meta["nnodes"].value
|
|
154
|
-
ny = self.meta["nvars"].value
|
|
155
|
-
nz = self.meta["savint_range"].value + 1
|
|
156
|
-
self.data["all_results"] = (ct.c_float * nx * ny * nz)()
|
|
157
|
-
self.data["max_results"] = (ct.c_float * nx * ny)()
|
|
158
|
-
self.data["min_results"] = (ct.c_float * nx * ny)()
|
|
159
|
-
self.data["max_times"] = (ct.c_int * nx * ny)()
|
|
160
|
-
self.data["min_times"] = (ct.c_int * nx * ny)()
|
|
161
|
-
zzn_read.process_zzn(
|
|
162
|
-
ct.byref(self.meta["zzn_name"]),
|
|
163
|
-
ct.byref(self.meta["node_ID"]),
|
|
164
|
-
ct.byref(self.meta["nnodes"]),
|
|
165
|
-
ct.byref(self.meta["is_quality"]),
|
|
166
|
-
ct.byref(self.meta["nvars"]),
|
|
167
|
-
ct.byref(self.meta["savint_range"]),
|
|
168
|
-
ct.byref(self.meta["savint_skip"]),
|
|
169
|
-
ct.byref(self.data["all_results"]),
|
|
170
|
-
ct.byref(self.data["max_results"]),
|
|
171
|
-
ct.byref(self.data["min_results"]),
|
|
172
|
-
ct.byref(self.data["max_times"]),
|
|
173
|
-
ct.byref(self.data["min_times"]),
|
|
174
|
-
ct.byref(self.meta["errstat"]),
|
|
175
|
-
ct.byref(self.meta["isavint"]),
|
|
176
|
-
)
|
|
177
|
-
|
|
178
|
-
# Convert useful metadata from C types into python types
|
|
179
|
-
|
|
180
|
-
self.meta["dt"] = self.meta["dt"].value
|
|
181
|
-
self.meta["nnodes"] = self.meta["nnodes"].value
|
|
182
|
-
self.meta["save_int"] = self.meta["save_int"].value
|
|
183
|
-
self.meta["nvars"] = self.meta["nvars"].value
|
|
184
|
-
self.meta["savint_range"] = self.meta["savint_range"].value
|
|
185
|
-
|
|
186
|
-
self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
|
|
187
|
-
self.meta["labels"] = [label.value.decode().strip() for label in list(self.meta["labels"])]
|
|
188
|
-
self.meta["model_title"] = self.meta["model_title"].value.decode()
|
|
189
|
-
|
|
190
|
-
def to_dataframe( # noqa: PLR0911
|
|
191
|
-
self,
|
|
192
|
-
result_type: str = "all",
|
|
193
|
-
variable: str = "all",
|
|
194
|
-
include_time: bool = False,
|
|
195
|
-
multilevel_header: bool = True,
|
|
196
|
-
) -> pd.Series | pd.DataFrame:
|
|
197
|
-
"""Loads zzn results to pandas dataframe object.
|
|
198
|
-
|
|
199
|
-
Args:
|
|
200
|
-
result_type (str, optional): {'all'} | 'max' | 'min'
|
|
201
|
-
Define whether to return all timesteps or just max/min results. Defaults to 'all'.
|
|
202
|
-
variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
|
|
203
|
-
Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
|
|
204
|
-
include_time (bool, optional):
|
|
205
|
-
Whether to include the time of max or min results. Defaults to False.
|
|
206
|
-
multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
|
|
207
|
-
headers with the variable as first level and node label as second header. If False, the column
|
|
208
|
-
names will be formatted "{node label}_{variable}". Defaults to True.
|
|
209
|
-
|
|
210
|
-
Returns:
|
|
211
|
-
pandas.DataFrame(): dataframe object of simulation results
|
|
212
|
-
"""
|
|
213
|
-
nx = self.meta["nnodes"]
|
|
214
|
-
ny = self.meta["nvars"]
|
|
215
|
-
nz = self.meta["savint_range"] + 1
|
|
216
|
-
result_type = result_type.lower()
|
|
217
|
-
|
|
218
|
-
if result_type == "all":
|
|
219
|
-
arr = np.array(self.data["all_results"])
|
|
220
|
-
time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
|
|
221
|
-
vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
|
|
222
|
-
if multilevel_header:
|
|
223
|
-
col_names = [vars_list, self.meta["labels"]]
|
|
224
|
-
df = pd.DataFrame(
|
|
225
|
-
arr.reshape(nz, nx * ny),
|
|
226
|
-
index=time_index,
|
|
227
|
-
columns=pd.MultiIndex.from_product(col_names),
|
|
228
|
-
)
|
|
229
|
-
df.index.name = "Time (hr)"
|
|
230
|
-
if variable != "all":
|
|
231
|
-
return df[variable.capitalize()]
|
|
232
|
-
|
|
233
|
-
else:
|
|
234
|
-
col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
|
|
235
|
-
df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
|
|
236
|
-
df.index.name = "Time (hr)"
|
|
237
|
-
if variable != "all":
|
|
238
|
-
use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
|
|
239
|
-
return df[use_cols]
|
|
240
|
-
return df
|
|
241
|
-
|
|
242
|
-
if result_type in ("max", "min"):
|
|
243
|
-
arr = np.array(self.data[f"{result_type}_results"]).transpose()
|
|
244
|
-
node_index = self.meta["labels"]
|
|
245
|
-
col_names = [
|
|
246
|
-
result_type.capitalize() + lbl
|
|
247
|
-
for lbl in [
|
|
248
|
-
" Flow",
|
|
249
|
-
" Stage",
|
|
250
|
-
" Froude",
|
|
251
|
-
" Velocity",
|
|
252
|
-
" Mode",
|
|
253
|
-
" State",
|
|
254
|
-
]
|
|
255
|
-
]
|
|
256
|
-
df = pd.DataFrame(arr, index=node_index, columns=col_names)
|
|
257
|
-
df.index.name = "Node Label"
|
|
258
|
-
|
|
259
|
-
if include_time:
|
|
260
|
-
times = np.array(self.data[f"{result_type}_times"]).transpose()
|
|
261
|
-
# transform timestep into hrs
|
|
262
|
-
times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
|
|
263
|
-
time_col_names = [name + " Time(hrs)" for name in col_names]
|
|
264
|
-
time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
|
|
265
|
-
time_df.index.name = "Node Label"
|
|
266
|
-
df = pd.concat([df, time_df], axis=1)
|
|
267
|
-
new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
|
|
268
|
-
df = df[new_col_order]
|
|
269
|
-
if variable != "all":
|
|
270
|
-
return df[
|
|
271
|
-
[
|
|
272
|
-
f"{result_type.capitalize()} {variable.capitalize()}",
|
|
273
|
-
f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
|
|
274
|
-
]
|
|
275
|
-
]
|
|
276
|
-
return df
|
|
277
|
-
|
|
278
|
-
if variable != "all":
|
|
279
|
-
return df[f"{result_type.capitalize()} {variable.capitalize()}"]
|
|
280
|
-
return df
|
|
281
|
-
|
|
282
|
-
raise ValueError(f'Result type: "{result_type}" not recognised')
|
|
283
|
-
|
|
284
|
-
def export_to_csv(
|
|
285
|
-
self,
|
|
286
|
-
save_location: str | Path = "default",
|
|
287
|
-
result_type: str = "all",
|
|
288
|
-
variable: str = "all",
|
|
289
|
-
include_time: bool = False,
|
|
290
|
-
) -> None:
|
|
291
|
-
"""Exports zzn results to CSV file.
|
|
292
|
-
|
|
293
|
-
Args:
|
|
294
|
-
save_location (str, optional): {default} | folder or file path
|
|
295
|
-
Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
|
|
296
|
-
result_type (str, optional): {all} | max | min
|
|
297
|
-
Define whether to output all timesteps or just max/min results. Defaults to 'all'.
|
|
298
|
-
variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
|
|
299
|
-
Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
|
|
300
|
-
include_time (bool, optional):
|
|
301
|
-
Whether to include the time of max or min results. Defaults to False.
|
|
302
|
-
|
|
303
|
-
Raises:
|
|
304
|
-
Exception: Raised if result_type set to invalid option
|
|
305
|
-
"""
|
|
306
|
-
if save_location == "default":
|
|
307
|
-
save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
|
|
308
|
-
else:
|
|
309
|
-
save_location = Path(save_location)
|
|
310
|
-
if not save_location.is_absolute():
|
|
311
|
-
# for if relative folder path given
|
|
312
|
-
save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
|
|
313
|
-
|
|
314
|
-
if save_location.suffix != ".csv": # Assumed to be pointing to a folder
|
|
315
|
-
# Check if the folder exists, if not create it
|
|
316
|
-
if not save_location.exists():
|
|
317
|
-
Path.mkdir(save_location)
|
|
318
|
-
save_location = Path(
|
|
319
|
-
save_location,
|
|
320
|
-
Path(self.meta["zzn_name"]).with_suffix(".csv").name,
|
|
321
|
-
)
|
|
322
|
-
|
|
323
|
-
elif not save_location.parent.exists():
|
|
324
|
-
Path.mkdir(save_location.parent)
|
|
325
|
-
|
|
326
|
-
result_type = result_type.lower()
|
|
327
|
-
|
|
328
|
-
if result_type.lower() not in ["all", "max", "min"]:
|
|
329
|
-
raise Exception(
|
|
330
|
-
f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' ",
|
|
331
|
-
)
|
|
332
|
-
|
|
333
|
-
df = self.to_dataframe(
|
|
334
|
-
result_type=result_type,
|
|
335
|
-
variable=variable,
|
|
336
|
-
include_time=include_time,
|
|
337
|
-
)
|
|
338
|
-
df.to_csv(save_location)
|
|
339
|
-
print(f"CSV saved to {save_location}")
|
|
340
|
-
|
|
341
|
-
def to_dict_of_dataframes(self, variable: str = "all") -> dict:
|
|
342
|
-
"""Loads zzn results to a dictionary of pandas dataframe objects.
|
|
343
|
-
|
|
344
|
-
Args:
|
|
345
|
-
variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
|
|
346
|
-
Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
|
|
347
|
-
variable names. Defaults to 'all'.
|
|
348
|
-
|
|
349
|
-
Returns:
|
|
350
|
-
dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
|
|
351
|
-
"""
|
|
352
|
-
nx = self.meta["nnodes"]
|
|
353
|
-
ny = self.meta["nvars"]
|
|
354
|
-
nz = self.meta["savint_range"] + 1
|
|
355
|
-
output = {}
|
|
356
|
-
|
|
357
|
-
arr = np.array(self.data["all_results"])
|
|
358
|
-
time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
|
|
359
|
-
|
|
360
|
-
vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
|
|
361
|
-
|
|
362
|
-
col_names = self.meta["labels"]
|
|
363
|
-
temp_arr = np.reshape(arr, (nz, ny, nx))
|
|
364
|
-
|
|
365
|
-
for i, var in enumerate(vars_list):
|
|
366
|
-
output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
|
|
367
|
-
output[var].index.name = "Time (hr)"
|
|
368
|
-
|
|
369
|
-
output["Time (hr)"] = time_index
|
|
370
|
-
|
|
371
|
-
if variable != "all":
|
|
372
|
-
input_vars = variable.split(",")
|
|
373
|
-
for i, var in enumerate(input_vars):
|
|
374
|
-
input_vars[i] = var.strip().capitalize()
|
|
375
|
-
if input_vars[i] not in vars_list:
|
|
376
|
-
raise Exception(
|
|
377
|
-
f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} ",
|
|
378
|
-
)
|
|
379
|
-
|
|
380
|
-
for var in vars_list:
|
|
381
|
-
if var not in input_vars:
|
|
382
|
-
del output[var]
|
|
383
|
-
return output
|
|
384
|
-
|
|
385
|
-
def to_json(
|
|
386
|
-
self,
|
|
387
|
-
result_type: str = "all",
|
|
388
|
-
variable: str = "all",
|
|
389
|
-
include_time: bool = False,
|
|
390
|
-
multilevel_header: bool = True,
|
|
391
|
-
) -> str:
|
|
392
|
-
"""Loads zzn results to JSON object.
|
|
393
|
-
|
|
394
|
-
Args:
|
|
395
|
-
result_type (str, optional): {'all'} | 'max' | 'min'
|
|
396
|
-
Define whether to return all timesteps or just max/min results. Defaults to 'all'.
|
|
397
|
-
variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
|
|
398
|
-
Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
|
|
399
|
-
include_time (bool, optional):
|
|
400
|
-
Whether to include the time of max or min results. Defaults to False.
|
|
401
|
-
multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
|
|
402
|
-
headers with the variable as first level and node label as second header. If False, the column
|
|
403
|
-
names will be formatted "{node label}_{variable}". Defaults to True.
|
|
404
|
-
|
|
405
|
-
Returns:
|
|
406
|
-
str: A JSON string representing the ZZN results.
|
|
407
|
-
"""
|
|
408
|
-
df = self.to_dataframe(result_type, variable, include_time, multilevel_header)
|
|
409
|
-
return to_json(df)
|
|
410
|
-
|
|
411
|
-
@classmethod
|
|
412
|
-
def from_json(cls, json_string: str = ""):
|
|
413
|
-
# Not possible
|
|
414
|
-
raise NotImplementedError("It is not possible to build a ZZN class instance from JSON")
|
|
File without changes
|
{floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
|
File without changes
|