floodmodeller-api 0.5.0.post1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. floodmodeller_api/__init__.py +11 -1
  2. floodmodeller_api/_base.py +55 -36
  3. floodmodeller_api/backup.py +15 -12
  4. floodmodeller_api/dat.py +191 -121
  5. floodmodeller_api/diff.py +4 -4
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +15 -14
  7. floodmodeller_api/ied.py +8 -10
  8. floodmodeller_api/ief.py +56 -42
  9. floodmodeller_api/ief_flags.py +1 -1
  10. floodmodeller_api/inp.py +7 -10
  11. floodmodeller_api/logs/lf.py +25 -26
  12. floodmodeller_api/logs/lf_helpers.py +20 -20
  13. floodmodeller_api/logs/lf_params.py +1 -5
  14. floodmodeller_api/mapping.py +11 -2
  15. floodmodeller_api/test/__init__.py +2 -2
  16. floodmodeller_api/test/conftest.py +2 -3
  17. floodmodeller_api/test/test_backup.py +2 -2
  18. floodmodeller_api/test/test_conveyance.py +13 -7
  19. floodmodeller_api/test/test_dat.py +168 -20
  20. floodmodeller_api/test/test_data/EX18_DAT_expected.json +164 -144
  21. floodmodeller_api/test/test_data/EX3_DAT_expected.json +6 -2
  22. floodmodeller_api/test/test_data/EX6_DAT_expected.json +12 -46
  23. floodmodeller_api/test/test_data/encoding_test_cp1252.dat +1081 -0
  24. floodmodeller_api/test/test_data/encoding_test_utf8.dat +1081 -0
  25. floodmodeller_api/test/test_data/integrated_bridge/AR_NoSP_NoBl_2O_NO_OneFRC.ied +33 -0
  26. floodmodeller_api/test/test_data/integrated_bridge/AR_vSP_25pc_1O.ied +32 -0
  27. floodmodeller_api/test/test_data/integrated_bridge/PL_vSP_25pc_1O.ied +34 -0
  28. floodmodeller_api/test/test_data/integrated_bridge/SBTwoFRCsStaggered.IED +32 -0
  29. floodmodeller_api/test/test_data/integrated_bridge/US_NoSP_NoBl_OR_RN.ied +28 -0
  30. floodmodeller_api/test/test_data/integrated_bridge/US_SP_NoBl_OR_frc_PT2-5_RN.ied +34 -0
  31. floodmodeller_api/test/test_data/integrated_bridge/US_fSP_NoBl_1O.ied +30 -0
  32. floodmodeller_api/test/test_data/integrated_bridge/US_nSP_NoBl_1O.ied +49 -0
  33. floodmodeller_api/test/test_data/integrated_bridge/US_vSP_NoBl_2O_Para.ied +35 -0
  34. floodmodeller_api/test/test_data/integrated_bridge.dat +40 -0
  35. floodmodeller_api/test/test_data/network.ied +2 -2
  36. floodmodeller_api/test/test_data/network_dat_expected.json +141 -243
  37. floodmodeller_api/test/test_data/network_ied_expected.json +2 -2
  38. floodmodeller_api/test/test_data/network_with_comments.ied +2 -2
  39. floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
  40. floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
  41. floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
  42. floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
  43. floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
  44. floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
  45. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
  46. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
  47. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
  48. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
  49. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
  50. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
  51. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
  52. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
  53. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
  54. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
  55. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
  56. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
  57. floodmodeller_api/test/test_flowtimeprofile.py +2 -2
  58. floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
  59. floodmodeller_api/test/test_ied.py +3 -3
  60. floodmodeller_api/test/test_ief.py +12 -4
  61. floodmodeller_api/test/test_inp.py +2 -2
  62. floodmodeller_api/test/test_integrated_bridge.py +159 -0
  63. floodmodeller_api/test/test_json.py +14 -13
  64. floodmodeller_api/test/test_logs_lf.py +50 -29
  65. floodmodeller_api/test/test_read_file.py +1 -0
  66. floodmodeller_api/test/test_river.py +12 -12
  67. floodmodeller_api/test/test_tool.py +8 -5
  68. floodmodeller_api/test/test_toolbox_structure_log.py +148 -158
  69. floodmodeller_api/test/test_xml2d.py +14 -16
  70. floodmodeller_api/test/test_zz.py +143 -0
  71. floodmodeller_api/to_from_json.py +9 -9
  72. floodmodeller_api/tool.py +15 -11
  73. floodmodeller_api/toolbox/example_tool.py +5 -1
  74. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +13 -9
  75. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +500 -194
  76. floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
  77. floodmodeller_api/units/__init__.py +15 -0
  78. floodmodeller_api/units/_base.py +87 -20
  79. floodmodeller_api/units/_helpers.py +343 -0
  80. floodmodeller_api/units/boundaries.py +59 -71
  81. floodmodeller_api/units/comment.py +1 -1
  82. floodmodeller_api/units/conduits.py +57 -54
  83. floodmodeller_api/units/connectors.py +112 -0
  84. floodmodeller_api/units/controls.py +107 -0
  85. floodmodeller_api/units/conveyance.py +1 -1
  86. floodmodeller_api/units/iic.py +2 -9
  87. floodmodeller_api/units/losses.py +44 -45
  88. floodmodeller_api/units/sections.py +52 -51
  89. floodmodeller_api/units/structures.py +361 -531
  90. floodmodeller_api/units/units.py +27 -26
  91. floodmodeller_api/units/unsupported.py +5 -7
  92. floodmodeller_api/units/variables.py +2 -2
  93. floodmodeller_api/urban1d/_base.py +13 -17
  94. floodmodeller_api/urban1d/conduits.py +11 -21
  95. floodmodeller_api/urban1d/general_parameters.py +1 -1
  96. floodmodeller_api/urban1d/junctions.py +7 -11
  97. floodmodeller_api/urban1d/losses.py +13 -17
  98. floodmodeller_api/urban1d/outfalls.py +18 -22
  99. floodmodeller_api/urban1d/raingauges.py +5 -10
  100. floodmodeller_api/urban1d/subsections.py +5 -4
  101. floodmodeller_api/urban1d/xsections.py +14 -17
  102. floodmodeller_api/util.py +23 -6
  103. floodmodeller_api/validation/parameters.py +7 -3
  104. floodmodeller_api/validation/urban_parameters.py +1 -4
  105. floodmodeller_api/validation/validation.py +11 -5
  106. floodmodeller_api/version.py +1 -1
  107. floodmodeller_api/xml2d.py +27 -31
  108. floodmodeller_api/xml2d_template.py +1 -1
  109. floodmodeller_api/zz.py +539 -0
  110. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/LICENSE.txt +1 -1
  111. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/METADATA +30 -16
  112. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/RECORD +116 -83
  113. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/WHEEL +1 -1
  114. floodmodeller_api/test/test_zzn.py +0 -36
  115. floodmodeller_api/units/helpers.py +0 -123
  116. floodmodeller_api/zzn.py +0 -414
  117. /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
  118. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/entry_points.txt +0 -0
  119. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,539 @@
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import ctypes as ct
20
+ import logging
21
+ from pathlib import Path
22
+ from types import MappingProxyType
23
+ from typing import TYPE_CHECKING, Any
24
+
25
+ import numpy as np
26
+ import pandas as pd
27
+
28
+ from ._base import FMFile
29
+ from .to_from_json import to_json
30
+ from .util import get_associated_file, handle_exception, is_windows
31
+
32
+ if TYPE_CHECKING:
33
+ from collections.abc import Mapping
34
+
35
+
36
+ def get_reader() -> ct.CDLL:
37
+ # Get zzn_dll path
38
+ lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
39
+ zzn_dll = Path(__file__).resolve().parent / "libs" / lib
40
+
41
+ # Catch LD_LIBRARY_PATH error for linux
42
+ try:
43
+ return ct.CDLL(str(zzn_dll))
44
+ except OSError as e:
45
+ msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
46
+ if msg_1 in str(e):
47
+ msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
48
+ raise OSError(msg_2) from e
49
+ raise
50
+
51
+
52
+ def check_errstat(routine: str, errstat: int) -> None:
53
+ if errstat != 0:
54
+ msg = (
55
+ f"Errstat from {routine} routine is {errstat}."
56
+ " See zzread_errorlog.txt for more information."
57
+ )
58
+ raise RuntimeError(msg)
59
+
60
+
61
+ def run_routines(
62
+ reader: ct.CDLL,
63
+ zzl: Path,
64
+ zzn_or_zzx: Path,
65
+ is_quality: bool,
66
+ ) -> tuple[dict[str, Any], dict[str, Any]]:
67
+ data: dict[str, Any] = {}
68
+ meta: dict[str, Any] = {}
69
+
70
+ zzx_or_zzn_name = "zzx_name" if is_quality else "zzn_name"
71
+ zzx_or_zzl_name = "zzx_name" if is_quality else "zzl_name"
72
+ meta[zzx_or_zzn_name] = ct.create_string_buffer(bytes(str(zzn_or_zzx), "utf-8"), 255)
73
+ meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
74
+
75
+ # process zzl
76
+ meta["model_title"] = ct.create_string_buffer(b"", 128)
77
+ meta["nnodes"] = ct.c_int(0)
78
+ meta["label_length"] = ct.c_int(0)
79
+ meta["dt"] = ct.c_float(0.0)
80
+ meta["timestep0"] = ct.c_int(0)
81
+ meta["ltimestep"] = ct.c_int(0)
82
+ meta["save_int"] = ct.c_float(0.0)
83
+ meta["is_quality"] = ct.c_bool(is_quality)
84
+ meta["nvars"] = ct.c_int(0)
85
+ meta["tzero"] = (ct.c_int * 5)()
86
+ meta["errstat"] = ct.c_int(0)
87
+
88
+ reader.process_zzl(
89
+ ct.byref(meta[zzx_or_zzl_name]),
90
+ ct.byref(meta["model_title"]),
91
+ ct.byref(meta["nnodes"]),
92
+ ct.byref(meta["label_length"]),
93
+ ct.byref(meta["dt"]),
94
+ ct.byref(meta["timestep0"]),
95
+ ct.byref(meta["ltimestep"]),
96
+ ct.byref(meta["save_int"]),
97
+ ct.byref(meta["is_quality"]),
98
+ ct.byref(meta["nvars"]),
99
+ ct.byref(meta["tzero"]),
100
+ ct.byref(meta["errstat"]),
101
+ )
102
+ check_errstat("process_zzl", meta["errstat"].value)
103
+
104
+ # process labels
105
+ if meta["label_length"].value == 0: # means that we are probably running quality data
106
+ meta["label_length"].value = 12 # 12 is the max expected from dll
107
+
108
+ reader.process_labels(
109
+ ct.byref(meta["zzl_name"]),
110
+ ct.byref(meta["nnodes"]),
111
+ ct.byref(meta["label_length"]),
112
+ ct.byref(meta["errstat"]),
113
+ )
114
+ check_errstat("process_labels", meta["errstat"].value)
115
+
116
+ # get zz labels
117
+ meta["labels"] = (ct.c_char * meta["label_length"].value * meta["nnodes"].value)()
118
+ for i in range(meta["nnodes"].value):
119
+ reader.get_zz_label(
120
+ ct.byref(ct.c_int(i + 1)),
121
+ ct.byref(meta["labels"][i]),
122
+ ct.byref(meta["errstat"]),
123
+ )
124
+ check_errstat("get_zz_label", meta["errstat"].value)
125
+
126
+ # preprocess zzn
127
+ last_hr = (meta["ltimestep"].value - meta["timestep0"].value) * meta["dt"].value / 3600
128
+ meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
129
+ meta["aitimestep"] = (ct.c_int * 2)(meta["timestep0"].value, meta["ltimestep"].value)
130
+ meta["isavint"] = (ct.c_int * 2)()
131
+ reader.preprocess_zzn(
132
+ ct.byref(meta["output_hrs"]),
133
+ ct.byref(meta["aitimestep"]),
134
+ ct.byref(meta["dt"]),
135
+ ct.byref(meta["timestep0"]),
136
+ ct.byref(meta["ltimestep"]),
137
+ ct.byref(meta["save_int"]),
138
+ ct.byref(meta["isavint"]),
139
+ )
140
+
141
+ # process vars
142
+ reader.process_vars(
143
+ ct.byref(meta[zzx_or_zzl_name]),
144
+ ct.byref(meta["nvars"]),
145
+ ct.byref(meta["is_quality"]),
146
+ ct.byref(meta["errstat"]),
147
+ )
148
+ check_errstat("process_vars", meta["errstat"].value)
149
+
150
+ # get var names
151
+ meta["variables"] = (ct.c_char * 32 * meta["nvars"].value)()
152
+ for i in range(meta["nvars"].value):
153
+ reader.get_zz_variable_name(
154
+ ct.byref(ct.c_int(i + 1)),
155
+ ct.byref(meta["variables"][i]),
156
+ ct.byref(meta["errstat"]),
157
+ )
158
+ check_errstat("get_zz_variable_name", meta["errstat"].value)
159
+
160
+ # process zzn
161
+ meta["node_ID"] = ct.c_int(-1)
162
+ meta["savint_skip"] = ct.c_int(1)
163
+ meta["savint_range"] = ct.c_int(
164
+ int((meta["isavint"][1] - meta["isavint"][0]) / meta["savint_skip"].value),
165
+ )
166
+ nx = meta["nnodes"].value
167
+ ny = meta["nvars"].value
168
+ nz = meta["savint_range"].value + 1
169
+ data["all_results"] = (ct.c_float * nx * ny * nz)()
170
+ data["max_results"] = (ct.c_float * nx * ny)()
171
+ data["min_results"] = (ct.c_float * nx * ny)()
172
+ data["max_times"] = (ct.c_int * nx * ny)()
173
+ data["min_times"] = (ct.c_int * nx * ny)()
174
+ reader.process_zzn(
175
+ ct.byref(meta[zzx_or_zzn_name]),
176
+ ct.byref(meta["node_ID"]),
177
+ ct.byref(meta["nnodes"]),
178
+ ct.byref(meta["is_quality"]),
179
+ ct.byref(meta["nvars"]),
180
+ ct.byref(meta["savint_range"]),
181
+ ct.byref(meta["savint_skip"]),
182
+ ct.byref(data["all_results"]),
183
+ ct.byref(data["max_results"]),
184
+ ct.byref(data["min_results"]),
185
+ ct.byref(data["max_times"]),
186
+ ct.byref(data["min_times"]),
187
+ ct.byref(meta["errstat"]),
188
+ ct.byref(meta["isavint"]),
189
+ )
190
+ check_errstat("process_zzn", meta["errstat"].value)
191
+
192
+ return data, meta
193
+
194
+
195
+ def convert_data(data: dict[str, Any]) -> None:
196
+ for key, value in data.items():
197
+ data[key] = np.array(value)
198
+
199
+
200
+ def convert_meta(meta: dict[str, Any]) -> None:
201
+ to_get_value = (
202
+ "dt",
203
+ "errstat",
204
+ "is_quality",
205
+ "label_length",
206
+ "ltimestep",
207
+ "nnodes",
208
+ "node_ID",
209
+ "nvars",
210
+ "save_int",
211
+ "savint_range",
212
+ "savint_skip",
213
+ "timestep0",
214
+ )
215
+ for key in to_get_value:
216
+ meta[key] = meta[key].value
217
+
218
+ to_get_list = ("aitimestep", "isavint", "output_hrs", "tzero", "variables")
219
+ for key in to_get_list:
220
+ meta[key] = list(meta[key])
221
+
222
+ to_get_decoded_value = ("model_title", "zzl_name", "zzx_name", "zzn_name")
223
+ for key in to_get_decoded_value:
224
+ if key not in meta:
225
+ continue
226
+ meta[key] = meta[key].value.decode()
227
+
228
+ to_get_decoded_value_list = ("labels", "variables")
229
+ for key in to_get_decoded_value_list:
230
+ meta[key] = [x.value.decode().strip() for x in list(meta[key])]
231
+
232
+
233
+ class _ZZ(FMFile):
234
+ """Base class for ZZN and ZZX."""
235
+
236
+ @handle_exception(when="read")
237
+ def __init__(
238
+ self,
239
+ zzn_filepath: str | Path | None = None,
240
+ from_json: bool = False,
241
+ ):
242
+ if from_json:
243
+ return
244
+
245
+ FMFile.__init__(self, zzn_filepath)
246
+
247
+ reader = get_reader()
248
+ zzl = get_associated_file(self._filepath, ".zzl")
249
+
250
+ is_quality = self._suffix == ".zzx"
251
+
252
+ self._data, self._meta = run_routines(reader, zzl, self._filepath, is_quality)
253
+ convert_data(self._data)
254
+ convert_meta(self._meta)
255
+
256
+ self._nx = self._meta["nnodes"]
257
+ self._ny = self._meta["nvars"]
258
+ self._nz = self._meta["savint_range"] + 1
259
+ self._variables = (
260
+ self._meta["variables"]
261
+ if is_quality
262
+ else ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
263
+ )
264
+ self._index_name = "Label" if is_quality else "Node Label"
265
+
266
+ @property
267
+ def meta(self) -> Mapping[str, Any]:
268
+ return MappingProxyType(self._meta) # because dictionaries are mutable
269
+
270
+ def _get_all(self, variable: str, multilevel_header: bool) -> pd.DataFrame:
271
+ is_all = variable == "all"
272
+
273
+ variable_display_name = variable.capitalize().replace("fp", "FP")
274
+
275
+ arr = self._data["all_results"]
276
+ time_index = np.linspace(self._meta["output_hrs"][0], self._meta["output_hrs"][1], self._nz)
277
+
278
+ if multilevel_header:
279
+ result = pd.DataFrame(
280
+ arr.reshape(self._nz, self._nx * self._ny),
281
+ index=time_index,
282
+ columns=pd.MultiIndex.from_product([self._variables, self._meta["labels"]]),
283
+ )
284
+ result.index.name = "Time (hr)"
285
+ return result if is_all else result[variable_display_name] # type: ignore
286
+ # ignored because it always returns a dataframe as it's a multilevel header
287
+
288
+ result = pd.DataFrame(
289
+ arr.reshape(self._nz, self._nx * self._ny),
290
+ index=time_index,
291
+ columns=[f"{node}_{var}" for var in self._variables for node in self._meta["labels"]],
292
+ )
293
+ result.index.name = "Time (hr)"
294
+ return (
295
+ result
296
+ if is_all
297
+ else result[[x for x in result.columns if x.endswith(variable_display_name)]]
298
+ )
299
+
300
+ def _get_extremes(
301
+ self,
302
+ variable: str,
303
+ result_type: str,
304
+ include_time: bool,
305
+ ) -> pd.Series | pd.DataFrame:
306
+ is_all = variable == "all"
307
+
308
+ result_type_display_name = result_type.capitalize()
309
+ variable_display_name = variable.capitalize().replace("fp", "FP")
310
+
311
+ combination = f"{result_type_display_name} {variable_display_name}"
312
+
313
+ arr = self._data[f"{result_type}_results"].transpose()
314
+ node_index = self._meta["labels"]
315
+ col_names = [f"{result_type_display_name} {x}" for x in self._variables]
316
+ result = pd.DataFrame(arr, index=node_index, columns=col_names)
317
+ result.index.name = self._index_name
318
+
319
+ if not include_time:
320
+ # df[combination] is the only time we get a series in _ZZ.get_dataframe()
321
+ return result if is_all else result[combination]
322
+
323
+ times = self._data[f"{result_type}_times"].transpose()
324
+ times = np.linspace(self._meta["output_hrs"][0], self._meta["output_hrs"][1], self._nz)[
325
+ times - 1
326
+ ]
327
+ time_col_names = [name + " Time(hrs)" for name in col_names]
328
+ time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
329
+ time_df.index.name = self._index_name
330
+ result = pd.concat([result, time_df], axis=1)
331
+ new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
332
+ result = result[new_col_order]
333
+ return result if is_all else result[[combination, f"{combination} Time(hrs)"]]
334
+
335
+ def to_dataframe(
336
+ self,
337
+ result_type: str = "all",
338
+ variable: str = "all",
339
+ include_time: bool = False,
340
+ multilevel_header: bool = True,
341
+ ) -> pd.Series | pd.DataFrame:
342
+ result_type = result_type.lower()
343
+
344
+ if result_type == "all":
345
+ return self._get_all(variable, multilevel_header)
346
+
347
+ if result_type in {"max", "min"}:
348
+ return self._get_extremes(variable, result_type, include_time)
349
+
350
+ msg = f"Result type '{result_type}' not recognised"
351
+ raise ValueError(msg)
352
+
353
+ def export_to_csv(
354
+ self,
355
+ save_location: str | Path = "default",
356
+ result_type: str = "all",
357
+ variable: str = "all",
358
+ include_time: bool = False,
359
+ ) -> None:
360
+ if save_location == "default":
361
+ save_location = self._filepath.with_suffix(".csv")
362
+ else:
363
+ save_location = (
364
+ Path(save_location)
365
+ if Path(save_location).is_absolute()
366
+ else self._filepath.parent / save_location
367
+ )
368
+
369
+ if save_location.suffix != ".csv": # Assumed to be pointing to a folder
370
+ save_location = save_location / self._filepath.with_suffix(".csv").name
371
+
372
+ save_location.parent.mkdir(parents=True, exist_ok=True)
373
+
374
+ zz_df = self.to_dataframe(
375
+ result_type=result_type,
376
+ variable=variable,
377
+ include_time=include_time,
378
+ )
379
+ zz_df.to_csv(save_location)
380
+ logging.info("CSV saved to %s", save_location)
381
+
382
+ def to_json(
383
+ self,
384
+ result_type: str = "all",
385
+ variable: str = "all",
386
+ include_time: bool = False,
387
+ multilevel_header: bool = True,
388
+ ) -> str:
389
+ zz_df = self.to_dataframe(result_type, variable, include_time, multilevel_header)
390
+ return to_json(zz_df)
391
+
392
+ @classmethod
393
+ def from_json(cls, json_string: str = ""):
394
+ msg = f"It is not possible to build a {cls._filetype} class instance from JSON"
395
+ raise NotImplementedError(msg)
396
+
397
+
398
+ class ZZN(_ZZ):
399
+ """Reads and processes Flood Modeller 1D binary results format '.zzn'
400
+
401
+ Args:
402
+ zzn_filepath (str): Full filepath to model zzn file
403
+
404
+ Output:
405
+ Initiates 'ZZN' class object
406
+ """
407
+
408
+ _filetype: str = "ZZN"
409
+ _suffix: str = ".zzn"
410
+
411
+ def to_dataframe(self, *args, **kwargs) -> pd.Series | pd.DataFrame:
412
+ """Loads ZZN results to pandas dataframe object.
413
+
414
+ Args:
415
+ result_type (str, optional): {'all'} | 'max' | 'min'
416
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
417
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
418
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
419
+ include_time (bool, optional):
420
+ Whether to include the time of max or min results. Defaults to False.
421
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
422
+ headers with the variable as first level and node label as second header. If False, the column
423
+ names will be formatted "{node label}_{variable}". Defaults to True.
424
+
425
+ Returns:
426
+ pandas.DataFrame(): dataframe object of simulation results
427
+ """
428
+ return super().to_dataframe(*args, **kwargs)
429
+
430
+ def export_to_csv(self, *args, **kwargs) -> None:
431
+ """Exports ZZN results to CSV file.
432
+
433
+ Args:
434
+ save_location (str, optional): {default} | folder or file path
435
+ Full or relative path to folder or csv file to save output csv,
436
+ if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file.
437
+ Defaults to 'default'.
438
+ result_type (str, optional): {all} | max | min
439
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
440
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
441
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
442
+ include_time (bool, optional):
443
+ Whether to include the time of max or min results. Defaults to False.
444
+
445
+ Raises:
446
+ Exception: Raised if result_type set to invalid option
447
+ """
448
+ return super().export_to_csv(*args, **kwargs)
449
+
450
+ def to_json(self, *args, **kwargs) -> str:
451
+ """Loads ZZN results to JSON object.
452
+
453
+ Args:
454
+ result_type (str, optional): {'all'} | 'max' | 'min'
455
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
456
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
457
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
458
+ include_time (bool, optional):
459
+ Whether to include the time of max or min results. Defaults to False.
460
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
461
+ headers with the variable as first level and node label as second header. If False, the column
462
+ names will be formatted "{node label}_{variable}". Defaults to True.
463
+
464
+ Returns:
465
+ str: A JSON string representing the results.
466
+ """
467
+ return super().to_json(*args, **kwargs)
468
+
469
+
470
+ class ZZX(_ZZ):
471
+ """Reads and processes Flood Modeller 1D binary results format '.zzx'
472
+
473
+ Args:
474
+ zzx_filepath (str): Full filepath to model zzx file
475
+
476
+ Output:
477
+ Initiates 'ZZX' class object
478
+ """
479
+
480
+ _filetype: str = "ZZX"
481
+ _suffix: str = ".zzx"
482
+
483
+ def to_dataframe(self, *args, **kwargs) -> pd.Series | pd.DataFrame:
484
+ """Loads ZZX results to pandas dataframe object.
485
+
486
+ Args:
487
+ result_type (str, optional): {'all'} | 'max' | 'min'
488
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
489
+ variable (str, optional): {'all'} | 'Left FP h' | 'Link inflow' | 'Right FP h' | 'Right FP mode' | 'Left FP mode'
490
+ Specify a single output variable (e.g 'link inflow'). Defaults to 'all'.
491
+ include_time (bool, optional):
492
+ Whether to include the time of max or min results. Defaults to False.
493
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
494
+ headers with the variable as first level and node label as second header. If False, the column
495
+ names will be formatted "{node label}_{variable}". Defaults to True.
496
+
497
+ Returns:
498
+ pandas.DataFrame(): dataframe object of simulation results
499
+ """
500
+ return super().to_dataframe(*args, **kwargs)
501
+
502
+ def export_to_csv(self, *args, **kwargs) -> None:
503
+ """Exports ZZX results to CSV file.
504
+
505
+ Args:
506
+ save_location (str, optional): {default} | folder or file path
507
+ Full or relative path to folder or csv file to save output csv,
508
+ if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file.
509
+ Defaults to 'default'.
510
+ result_type (str, optional): {all} | max | min
511
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
512
+ variable (str, optional): {'all'} | 'Left FP h' | 'Link inflow' | 'Right FP h' | 'Right FP mode' | 'Left FP mode'
513
+ Specify a single output variable (e.g 'link inflow'). Defaults to 'all'.
514
+ include_time (bool, optional):
515
+ Whether to include the time of max or min results. Defaults to False.
516
+
517
+ Raises:
518
+ Exception: Raised if result_type set to invalid option
519
+ """
520
+ return super().export_to_csv(*args, **kwargs)
521
+
522
+ def to_json(self, *args, **kwargs) -> str:
523
+ """Loads ZZX results to JSON object.
524
+
525
+ Args:
526
+ result_type (str, optional): {'all'} | 'max' | 'min'
527
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
528
+ variable (str, optional): {'all'} | 'Left FP h' | 'Link inflow' | 'Right FP h' | 'Right FP mode' | 'Left FP mode'
529
+ Specify a single output variable (e.g 'link inflow'). Defaults to 'all'.
530
+ include_time (bool, optional):
531
+ Whether to include the time of max or min results. Defaults to False.
532
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
533
+ headers with the variable as first level and node label as second header. If False, the column
534
+ names will be formatted "{node label}_{variable}". Defaults to True.
535
+
536
+ Returns:
537
+ str: A JSON string representing the results.
538
+ """
539
+ return super().to_json(*args, **kwargs)
@@ -1,5 +1,5 @@
1
1
  Flood Modeller Python API
2
- Copyright (C) 2024 Jacobs U.K. Limited
2
+ Copyright (C) 2025 Jacobs U.K. Limited
3
3
 
4
4
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
5
5
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
@@ -1,26 +1,37 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: floodmodeller_api
3
- Version: 0.5.0.post1
3
+ Version: 0.5.2
4
4
  Summary: Extends the functionality of Flood Modeller to python users
5
5
  Author: Jacobs
6
6
  Author-email: joe.pierce@jacobs.com
7
- License: GNU General Public License V3. Copyright (C) 2024 Jacobs U.K. Limited.
7
+ License: GNU General Public License V3. Copyright (C) 2025 Jacobs U.K. Limited.
8
8
  Project-URL: API Documentation, https://api.floodmodeller.com/api/
9
9
  Project-URL: Flood Modeller Homepage, https://www.floodmodeller.com/
10
10
  Project-URL: Contact & Support, https://www.floodmodeller.com/contact
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE.txt
13
- Requires-Dist: pandas <3,>1
14
- Requires-Dist: lxml ==5.*
15
- Requires-Dist: tqdm ==4.*
16
- Requires-Dist: pytest <8,>4
17
- Requires-Dist: pytest-mock ==3.*
18
- Requires-Dist: shapely ==2.*
19
- Requires-Dist: scipy ==1.*
20
- Requires-Dist: freezegun ==1.*
21
- Requires-Dist: requests >2.23
22
-
23
- ![FM Logo](https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/_static/flood-modeller-logo-hero-image.png)
13
+ Requires-Dist: pandas<3,>1
14
+ Requires-Dist: lxml==5.*
15
+ Requires-Dist: tqdm==4.*
16
+ Requires-Dist: pytest<8,>4
17
+ Requires-Dist: pytest-mock==3.*
18
+ Requires-Dist: shapely==2.*
19
+ Requires-Dist: scipy==1.*
20
+ Requires-Dist: freezegun==1.*
21
+ Requires-Dist: requests>2.23
22
+ Dynamic: author
23
+ Dynamic: author-email
24
+ Dynamic: description
25
+ Dynamic: description-content-type
26
+ Dynamic: license
27
+ Dynamic: project-url
28
+ Dynamic: requires-dist
29
+ Dynamic: summary
30
+
31
+ <picture>
32
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/_static/flood-modeller-logo-hero-image-dark.png">
33
+ <img alt="FM Logo" src="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/_static/flood-modeller-logo-hero-image.png">
34
+ </picture>
24
35
 
25
36
 
26
37
  [![PyPI Latest Release](https://img.shields.io/pypi/v/floodmodeller-api.svg)](https://pypi.org/project/floodmodeller-api/)
@@ -37,7 +48,10 @@ Requires-Dist: requests >2.23
37
48
 
38
49
  This python package is designed to be used in conjunction with your installation of Flood Modeller to provide users with a set of tools to extend the capabilities of Flood Modeller and build into automated workflows.
39
50
 
40
- ![API Overview](https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/getting_started/api_overview_small.png)
51
+ <picture>
52
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/getting_started/api_overview_dark.png">
53
+ <img alt="API Overview" src="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/getting_started/api_overview.png">
54
+ </picture>
41
55
 
42
56
  ## Installation
43
57
  You can install the floodmodeller_api package from PyPI with the following command:
@@ -46,7 +60,7 @@ You can install the floodmodeller_api package from PyPI with the following comma
46
60
  pip install floodmodeller-api
47
61
  ```
48
62
 
49
- Python 3.9 or greater is required.
63
+ Python 3.10 or greater is required.
50
64
 
51
65
  Once you have installed floodmodeller_api to your python environment, you can import the package with:
52
66