floodmodeller-api 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. floodmodeller_api/__init__.py +1 -1
  2. floodmodeller_api/_base.py +26 -16
  3. floodmodeller_api/backup.py +3 -2
  4. floodmodeller_api/dat.py +29 -30
  5. floodmodeller_api/diff.py +3 -3
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +14 -13
  7. floodmodeller_api/ied.py +6 -6
  8. floodmodeller_api/ief.py +27 -25
  9. floodmodeller_api/inp.py +3 -4
  10. floodmodeller_api/logs/lf.py +9 -16
  11. floodmodeller_api/logs/lf_helpers.py +18 -18
  12. floodmodeller_api/mapping.py +2 -0
  13. floodmodeller_api/test/__init__.py +2 -2
  14. floodmodeller_api/test/conftest.py +2 -3
  15. floodmodeller_api/test/test_backup.py +2 -2
  16. floodmodeller_api/test/test_conveyance.py +4 -3
  17. floodmodeller_api/test/test_dat.py +2 -2
  18. floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
  19. floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
  20. floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
  21. floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
  22. floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
  23. floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
  24. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
  25. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
  26. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
  27. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
  28. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
  29. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
  30. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
  31. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
  32. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
  33. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
  34. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
  35. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
  36. floodmodeller_api/test/test_flowtimeprofile.py +2 -2
  37. floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
  38. floodmodeller_api/test/test_ied.py +2 -2
  39. floodmodeller_api/test/test_ief.py +2 -2
  40. floodmodeller_api/test/test_inp.py +2 -2
  41. floodmodeller_api/test/test_json.py +5 -10
  42. floodmodeller_api/test/test_logs_lf.py +6 -6
  43. floodmodeller_api/test/test_read_file.py +1 -0
  44. floodmodeller_api/test/test_river.py +79 -2
  45. floodmodeller_api/test/test_tool.py +8 -5
  46. floodmodeller_api/test/test_toolbox_structure_log.py +149 -158
  47. floodmodeller_api/test/test_xml2d.py +9 -11
  48. floodmodeller_api/test/test_zz.py +143 -0
  49. floodmodeller_api/to_from_json.py +8 -8
  50. floodmodeller_api/tool.py +12 -6
  51. floodmodeller_api/toolbox/example_tool.py +5 -1
  52. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +12 -8
  53. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +498 -196
  54. floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
  55. floodmodeller_api/units/_base.py +14 -10
  56. floodmodeller_api/units/conveyance.py +10 -8
  57. floodmodeller_api/units/helpers.py +1 -3
  58. floodmodeller_api/units/losses.py +2 -3
  59. floodmodeller_api/units/sections.py +15 -11
  60. floodmodeller_api/units/structures.py +9 -9
  61. floodmodeller_api/units/units.py +2 -0
  62. floodmodeller_api/urban1d/_base.py +6 -9
  63. floodmodeller_api/urban1d/outfalls.py +2 -1
  64. floodmodeller_api/urban1d/raingauges.py +2 -1
  65. floodmodeller_api/urban1d/subsections.py +2 -0
  66. floodmodeller_api/urban1d/xsections.py +3 -2
  67. floodmodeller_api/util.py +16 -2
  68. floodmodeller_api/validation/validation.py +2 -1
  69. floodmodeller_api/version.py +1 -1
  70. floodmodeller_api/xml2d.py +18 -20
  71. floodmodeller_api/zz.py +538 -0
  72. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/METADATA +20 -14
  73. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/RECORD +78 -60
  74. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/WHEEL +1 -1
  75. floodmodeller_api/test/test_zzn.py +0 -36
  76. floodmodeller_api/zzn.py +0 -414
  77. /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
  78. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/LICENSE.txt +0 -0
  79. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/entry_points.txt +0 -0
  80. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,538 @@
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import ctypes as ct
20
+ from pathlib import Path
21
+ from types import MappingProxyType
22
+ from typing import TYPE_CHECKING, Any
23
+
24
+ import numpy as np
25
+ import pandas as pd
26
+
27
+ from ._base import FMFile
28
+ from .to_from_json import to_json
29
+ from .util import get_associated_file, handle_exception, is_windows
30
+
31
+ if TYPE_CHECKING:
32
+ from collections.abc import Mapping
33
+
34
+
35
+ def get_reader() -> ct.CDLL:
36
+ # Get zzn_dll path
37
+ lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
38
+ zzn_dll = Path(__file__).resolve().parent / "libs" / lib
39
+
40
+ # Catch LD_LIBRARY_PATH error for linux
41
+ try:
42
+ return ct.CDLL(str(zzn_dll))
43
+ except OSError as e:
44
+ msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
45
+ if msg_1 in str(e):
46
+ msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
47
+ raise OSError(msg_2) from e
48
+ raise
49
+
50
+
51
+ def check_errstat(routine: str, errstat: int) -> None:
52
+ if errstat != 0:
53
+ msg = (
54
+ f"Errstat from {routine} routine is {errstat}."
55
+ " See zzread_errorlog.txt for more information."
56
+ )
57
+ raise RuntimeError(msg)
58
+
59
+
60
+ def run_routines(
61
+ reader: ct.CDLL,
62
+ zzl: Path,
63
+ zzn_or_zzx: Path,
64
+ is_quality: bool,
65
+ ) -> tuple[dict[str, Any], dict[str, Any]]:
66
+ data: dict[str, Any] = {}
67
+ meta: dict[str, Any] = {}
68
+
69
+ zzx_or_zzn_name = "zzx_name" if is_quality else "zzn_name"
70
+ zzx_or_zzl_name = "zzx_name" if is_quality else "zzl_name"
71
+ meta[zzx_or_zzn_name] = ct.create_string_buffer(bytes(str(zzn_or_zzx), "utf-8"), 255)
72
+ meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
73
+
74
+ # process zzl
75
+ meta["model_title"] = ct.create_string_buffer(b"", 128)
76
+ meta["nnodes"] = ct.c_int(0)
77
+ meta["label_length"] = ct.c_int(0)
78
+ meta["dt"] = ct.c_float(0.0)
79
+ meta["timestep0"] = ct.c_int(0)
80
+ meta["ltimestep"] = ct.c_int(0)
81
+ meta["save_int"] = ct.c_float(0.0)
82
+ meta["is_quality"] = ct.c_bool(is_quality)
83
+ meta["nvars"] = ct.c_int(0)
84
+ meta["tzero"] = (ct.c_int * 5)()
85
+ meta["errstat"] = ct.c_int(0)
86
+
87
+ reader.process_zzl(
88
+ ct.byref(meta[zzx_or_zzl_name]),
89
+ ct.byref(meta["model_title"]),
90
+ ct.byref(meta["nnodes"]),
91
+ ct.byref(meta["label_length"]),
92
+ ct.byref(meta["dt"]),
93
+ ct.byref(meta["timestep0"]),
94
+ ct.byref(meta["ltimestep"]),
95
+ ct.byref(meta["save_int"]),
96
+ ct.byref(meta["is_quality"]),
97
+ ct.byref(meta["nvars"]),
98
+ ct.byref(meta["tzero"]),
99
+ ct.byref(meta["errstat"]),
100
+ )
101
+ check_errstat("process_zzl", meta["errstat"].value)
102
+
103
+ # process labels
104
+ if meta["label_length"].value == 0: # means that we are probably running quality data
105
+ meta["label_length"].value = 12 # 12 is the max expected from dll
106
+
107
+ reader.process_labels(
108
+ ct.byref(meta["zzl_name"]),
109
+ ct.byref(meta["nnodes"]),
110
+ ct.byref(meta["label_length"]),
111
+ ct.byref(meta["errstat"]),
112
+ )
113
+ check_errstat("process_labels", meta["errstat"].value)
114
+
115
+ # get zz labels
116
+ meta["labels"] = (ct.c_char * meta["label_length"].value * meta["nnodes"].value)()
117
+ for i in range(meta["nnodes"].value):
118
+ reader.get_zz_label(
119
+ ct.byref(ct.c_int(i + 1)),
120
+ ct.byref(meta["labels"][i]),
121
+ ct.byref(meta["errstat"]),
122
+ )
123
+ check_errstat("get_zz_label", meta["errstat"].value)
124
+
125
+ # preprocess zzn
126
+ last_hr = (meta["ltimestep"].value - meta["timestep0"].value) * meta["dt"].value / 3600
127
+ meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
128
+ meta["aitimestep"] = (ct.c_int * 2)(meta["timestep0"].value, meta["ltimestep"].value)
129
+ meta["isavint"] = (ct.c_int * 2)()
130
+ reader.preprocess_zzn(
131
+ ct.byref(meta["output_hrs"]),
132
+ ct.byref(meta["aitimestep"]),
133
+ ct.byref(meta["dt"]),
134
+ ct.byref(meta["timestep0"]),
135
+ ct.byref(meta["ltimestep"]),
136
+ ct.byref(meta["save_int"]),
137
+ ct.byref(meta["isavint"]),
138
+ )
139
+
140
+ # process vars
141
+ reader.process_vars(
142
+ ct.byref(meta[zzx_or_zzl_name]),
143
+ ct.byref(meta["nvars"]),
144
+ ct.byref(meta["is_quality"]),
145
+ ct.byref(meta["errstat"]),
146
+ )
147
+ check_errstat("process_vars", meta["errstat"].value)
148
+
149
+ # get var names
150
+ meta["variables"] = (ct.c_char * 32 * meta["nvars"].value)()
151
+ for i in range(meta["nvars"].value):
152
+ reader.get_zz_variable_name(
153
+ ct.byref(ct.c_int(i + 1)),
154
+ ct.byref(meta["variables"][i]),
155
+ ct.byref(meta["errstat"]),
156
+ )
157
+ check_errstat("get_zz_variable_name", meta["errstat"].value)
158
+
159
+ # process zzn
160
+ meta["node_ID"] = ct.c_int(-1)
161
+ meta["savint_skip"] = ct.c_int(1)
162
+ meta["savint_range"] = ct.c_int(
163
+ int((meta["isavint"][1] - meta["isavint"][0]) / meta["savint_skip"].value),
164
+ )
165
+ nx = meta["nnodes"].value
166
+ ny = meta["nvars"].value
167
+ nz = meta["savint_range"].value + 1
168
+ data["all_results"] = (ct.c_float * nx * ny * nz)()
169
+ data["max_results"] = (ct.c_float * nx * ny)()
170
+ data["min_results"] = (ct.c_float * nx * ny)()
171
+ data["max_times"] = (ct.c_int * nx * ny)()
172
+ data["min_times"] = (ct.c_int * nx * ny)()
173
+ reader.process_zzn(
174
+ ct.byref(meta[zzx_or_zzn_name]),
175
+ ct.byref(meta["node_ID"]),
176
+ ct.byref(meta["nnodes"]),
177
+ ct.byref(meta["is_quality"]),
178
+ ct.byref(meta["nvars"]),
179
+ ct.byref(meta["savint_range"]),
180
+ ct.byref(meta["savint_skip"]),
181
+ ct.byref(data["all_results"]),
182
+ ct.byref(data["max_results"]),
183
+ ct.byref(data["min_results"]),
184
+ ct.byref(data["max_times"]),
185
+ ct.byref(data["min_times"]),
186
+ ct.byref(meta["errstat"]),
187
+ ct.byref(meta["isavint"]),
188
+ )
189
+ check_errstat("process_zzn", meta["errstat"].value)
190
+
191
+ return data, meta
192
+
193
+
194
+ def convert_data(data: dict[str, Any]) -> None:
195
+ for key in data:
196
+ data[key] = np.array(data[key])
197
+
198
+
199
+ def convert_meta(meta: dict[str, Any]) -> None:
200
+ to_get_value = (
201
+ "dt",
202
+ "errstat",
203
+ "is_quality",
204
+ "label_length",
205
+ "ltimestep",
206
+ "nnodes",
207
+ "node_ID",
208
+ "nvars",
209
+ "save_int",
210
+ "savint_range",
211
+ "savint_skip",
212
+ "timestep0",
213
+ )
214
+ for key in to_get_value:
215
+ meta[key] = meta[key].value
216
+
217
+ to_get_list = ("aitimestep", "isavint", "output_hrs", "tzero", "variables")
218
+ for key in to_get_list:
219
+ meta[key] = list(meta[key])
220
+
221
+ to_get_decoded_value = ("model_title", "zzl_name", "zzx_name", "zzn_name")
222
+ for key in to_get_decoded_value:
223
+ if key not in meta:
224
+ continue
225
+ meta[key] = meta[key].value.decode()
226
+
227
+ to_get_decoded_value_list = ("labels", "variables")
228
+ for key in to_get_decoded_value_list:
229
+ meta[key] = [x.value.decode().strip() for x in list(meta[key])]
230
+
231
+
232
+ class _ZZ(FMFile):
233
+ """Base class for ZZN and ZZX."""
234
+
235
+ @handle_exception(when="read")
236
+ def __init__(
237
+ self,
238
+ zzn_filepath: str | Path | None = None,
239
+ from_json: bool = False,
240
+ ):
241
+ if from_json:
242
+ return
243
+
244
+ FMFile.__init__(self, zzn_filepath)
245
+
246
+ reader = get_reader()
247
+ zzl = get_associated_file(self._filepath, ".zzl")
248
+
249
+ is_quality = self._suffix == ".zzx"
250
+
251
+ self._data, self._meta = run_routines(reader, zzl, self._filepath, is_quality)
252
+ convert_data(self._data)
253
+ convert_meta(self._meta)
254
+
255
+ self._nx = self._meta["nnodes"]
256
+ self._ny = self._meta["nvars"]
257
+ self._nz = self._meta["savint_range"] + 1
258
+ self._variables = (
259
+ self._meta["variables"]
260
+ if is_quality
261
+ else ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
262
+ )
263
+ self._index_name = "Label" if is_quality else "Node Label"
264
+
265
+ @property
266
+ def meta(self) -> Mapping[str, Any]:
267
+ return MappingProxyType(self._meta) # because dictionaries are mutable
268
+
269
+ def _get_all(self, variable: str, multilevel_header: bool) -> pd.DataFrame:
270
+ is_all = variable == "all"
271
+
272
+ variable_display_name = variable.capitalize().replace("fp", "FP")
273
+
274
+ arr = self._data["all_results"]
275
+ time_index = np.linspace(self._meta["output_hrs"][0], self._meta["output_hrs"][1], self._nz)
276
+
277
+ if multilevel_header:
278
+ result = pd.DataFrame(
279
+ arr.reshape(self._nz, self._nx * self._ny),
280
+ index=time_index,
281
+ columns=pd.MultiIndex.from_product([self._variables, self._meta["labels"]]),
282
+ )
283
+ result.index.name = "Time (hr)"
284
+ return result if is_all else result[variable_display_name] # type: ignore
285
+ # ignored because it always returns a dataframe as it's a multilevel header
286
+
287
+ result = pd.DataFrame(
288
+ arr.reshape(self._nz, self._nx * self._ny),
289
+ index=time_index,
290
+ columns=[f"{node}_{var}" for var in self._variables for node in self._meta["labels"]],
291
+ )
292
+ result.index.name = "Time (hr)"
293
+ return (
294
+ result
295
+ if is_all
296
+ else result[[x for x in result.columns if x.endswith(variable_display_name)]]
297
+ )
298
+
299
+ def _get_extremes(
300
+ self,
301
+ variable: str,
302
+ result_type: str,
303
+ include_time: bool,
304
+ ) -> pd.Series | pd.DataFrame:
305
+ is_all = variable == "all"
306
+
307
+ result_type_display_name = result_type.capitalize()
308
+ variable_display_name = variable.capitalize().replace("fp", "FP")
309
+
310
+ combination = f"{result_type_display_name} {variable_display_name}"
311
+
312
+ arr = self._data[f"{result_type}_results"].transpose()
313
+ node_index = self._meta["labels"]
314
+ col_names = [f"{result_type_display_name} {x}" for x in self._variables]
315
+ result = pd.DataFrame(arr, index=node_index, columns=col_names)
316
+ result.index.name = self._index_name
317
+
318
+ if not include_time:
319
+ # df[combination] is the only time we get a series in _ZZ.get_dataframe()
320
+ return result if is_all else result[combination]
321
+
322
+ times = self._data[f"{result_type}_times"].transpose()
323
+ times = np.linspace(self._meta["output_hrs"][0], self._meta["output_hrs"][1], self._nz)[
324
+ times - 1
325
+ ]
326
+ time_col_names = [name + " Time(hrs)" for name in col_names]
327
+ time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
328
+ time_df.index.name = self._index_name
329
+ result = pd.concat([result, time_df], axis=1)
330
+ new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
331
+ result = result[new_col_order]
332
+ return result if is_all else result[[combination, f"{combination} Time(hrs)"]]
333
+
334
+ def to_dataframe(
335
+ self,
336
+ result_type: str = "all",
337
+ variable: str = "all",
338
+ include_time: bool = False,
339
+ multilevel_header: bool = True,
340
+ ) -> pd.Series | pd.DataFrame:
341
+ result_type = result_type.lower()
342
+
343
+ if result_type == "all":
344
+ return self._get_all(variable, multilevel_header)
345
+
346
+ if result_type in ("max", "min"):
347
+ return self._get_extremes(variable, result_type, include_time)
348
+
349
+ msg = f'Result type: "{result_type}" not recognised'
350
+ raise ValueError(msg)
351
+
352
+ def export_to_csv(
353
+ self,
354
+ save_location: str | Path = "default",
355
+ result_type: str = "all",
356
+ variable: str = "all",
357
+ include_time: bool = False,
358
+ ) -> None:
359
+ if save_location == "default":
360
+ save_location = self._filepath.with_suffix(".csv")
361
+ else:
362
+ save_location = (
363
+ Path(save_location)
364
+ if Path(save_location).is_absolute()
365
+ else self._filepath.parent / save_location
366
+ )
367
+
368
+ if save_location.suffix != ".csv": # Assumed to be pointing to a folder
369
+ save_location = save_location / self._filepath.with_suffix(".csv").name
370
+
371
+ save_location.parent.mkdir(parents=True, exist_ok=True)
372
+
373
+ zz_df = self.to_dataframe(
374
+ result_type=result_type,
375
+ variable=variable,
376
+ include_time=include_time,
377
+ )
378
+ zz_df.to_csv(save_location)
379
+ print(f"CSV saved to {save_location}")
380
+
381
+ def to_json(
382
+ self,
383
+ result_type: str = "all",
384
+ variable: str = "all",
385
+ include_time: bool = False,
386
+ multilevel_header: bool = True,
387
+ ) -> str:
388
+ zz_df = self.to_dataframe(result_type, variable, include_time, multilevel_header)
389
+ return to_json(zz_df)
390
+
391
+ @classmethod
392
+ def from_json(cls, json_string: str = ""):
393
+ msg = f"It is not possible to build a {cls._filetype} class instance from JSON"
394
+ raise NotImplementedError(msg)
395
+
396
+
397
+ class ZZN(_ZZ):
398
+ """Reads and processes Flood Modeller 1D binary results format '.zzn'
399
+
400
+ Args:
401
+ zzn_filepath (str): Full filepath to model zzn file
402
+
403
+ Output:
404
+ Initiates 'ZZN' class object
405
+ """
406
+
407
+ _filetype: str = "ZZN"
408
+ _suffix: str = ".zzn"
409
+
410
+ def to_dataframe(self, *args, **kwargs) -> pd.Series | pd.DataFrame:
411
+ """Loads ZZN results to pandas dataframe object.
412
+
413
+ Args:
414
+ result_type (str, optional): {'all'} | 'max' | 'min'
415
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
416
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
417
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
418
+ include_time (bool, optional):
419
+ Whether to include the time of max or min results. Defaults to False.
420
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
421
+ headers with the variable as first level and node label as second header. If False, the column
422
+ names will be formatted "{node label}_{variable}". Defaults to True.
423
+
424
+ Returns:
425
+ pandas.DataFrame(): dataframe object of simulation results
426
+ """
427
+ return super().to_dataframe(*args, **kwargs)
428
+
429
+ def export_to_csv(self, *args, **kwargs) -> None:
430
+ """Exports ZZN results to CSV file.
431
+
432
+ Args:
433
+ save_location (str, optional): {default} | folder or file path
434
+ Full or relative path to folder or csv file to save output csv,
435
+ if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file.
436
+ Defaults to 'default'.
437
+ result_type (str, optional): {all} | max | min
438
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
439
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
440
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
441
+ include_time (bool, optional):
442
+ Whether to include the time of max or min results. Defaults to False.
443
+
444
+ Raises:
445
+ Exception: Raised if result_type set to invalid option
446
+ """
447
+ return super().export_to_csv(*args, **kwargs)
448
+
449
+ def to_json(self, *args, **kwargs) -> str:
450
+ """Loads ZZN results to JSON object.
451
+
452
+ Args:
453
+ result_type (str, optional): {'all'} | 'max' | 'min'
454
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
455
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
456
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
457
+ include_time (bool, optional):
458
+ Whether to include the time of max or min results. Defaults to False.
459
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
460
+ headers with the variable as first level and node label as second header. If False, the column
461
+ names will be formatted "{node label}_{variable}". Defaults to True.
462
+
463
+ Returns:
464
+ str: A JSON string representing the results.
465
+ """
466
+ return super().to_json(*args, **kwargs)
467
+
468
+
469
+ class ZZX(_ZZ):
470
+ """Reads and processes Flood Modeller 1D binary results format '.zzx'
471
+
472
+ Args:
473
+ zzx_filepath (str): Full filepath to model zzx file
474
+
475
+ Output:
476
+ Initiates 'ZZX' class object
477
+ """
478
+
479
+ _filetype: str = "ZZX"
480
+ _suffix: str = ".zzx"
481
+
482
+ def to_dataframe(self, *args, **kwargs) -> pd.Series | pd.DataFrame:
483
+ """Loads ZZX results to pandas dataframe object.
484
+
485
+ Args:
486
+ result_type (str, optional): {'all'} | 'max' | 'min'
487
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
488
+ variable (str, optional): {'all'} | 'Left FP h' | 'Link inflow' | 'Right FP h' | 'Right FP mode' | 'Left FP mode'
489
+ Specify a single output variable (e.g 'link inflow'). Defaults to 'all'.
490
+ include_time (bool, optional):
491
+ Whether to include the time of max or min results. Defaults to False.
492
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
493
+ headers with the variable as first level and node label as second header. If False, the column
494
+ names will be formatted "{node label}_{variable}". Defaults to True.
495
+
496
+ Returns:
497
+ pandas.DataFrame(): dataframe object of simulation results
498
+ """
499
+ return super().to_dataframe(*args, **kwargs)
500
+
501
+ def export_to_csv(self, *args, **kwargs) -> None:
502
+ """Exports ZZX results to CSV file.
503
+
504
+ Args:
505
+ save_location (str, optional): {default} | folder or file path
506
+ Full or relative path to folder or csv file to save output csv,
507
+ if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file.
508
+ Defaults to 'default'.
509
+ result_type (str, optional): {all} | max | min
510
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
511
+ variable (str, optional): {'all'} | 'Left FP h' | 'Link inflow' | 'Right FP h' | 'Right FP mode' | 'Left FP mode'
512
+ Specify a single output variable (e.g 'link inflow'). Defaults to 'all'.
513
+ include_time (bool, optional):
514
+ Whether to include the time of max or min results. Defaults to False.
515
+
516
+ Raises:
517
+ Exception: Raised if result_type set to invalid option
518
+ """
519
+ return super().export_to_csv(*args, **kwargs)
520
+
521
+ def to_json(self, *args, **kwargs) -> str:
522
+ """Loads ZZX results to JSON object.
523
+
524
+ Args:
525
+ result_type (str, optional): {'all'} | 'max' | 'min'
526
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
527
+ variable (str, optional): {'all'} | 'Left FP h' | 'Link inflow' | 'Right FP h' | 'Right FP mode' | 'Left FP mode'
528
+ Specify a single output variable (e.g 'link inflow'). Defaults to 'all'.
529
+ include_time (bool, optional):
530
+ Whether to include the time of max or min results. Defaults to False.
531
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
532
+ headers with the variable as first level and node label as second header. If False, the column
533
+ names will be formatted "{node label}_{variable}". Defaults to True.
534
+
535
+ Returns:
536
+ str: A JSON string representing the results.
537
+ """
538
+ return super().to_json(*args, **kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: floodmodeller_api
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: Extends the functionality of Flood Modeller to python users
5
5
  Author: Jacobs
6
6
  Author-email: joe.pierce@jacobs.com
@@ -10,17 +10,20 @@ Project-URL: Flood Modeller Homepage, https://www.floodmodeller.com/
10
10
  Project-URL: Contact & Support, https://www.floodmodeller.com/contact
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE.txt
13
- Requires-Dist: pandas <3,>1
14
- Requires-Dist: lxml ==5.*
15
- Requires-Dist: tqdm ==4.*
16
- Requires-Dist: pytest <8,>4
17
- Requires-Dist: pytest-mock ==3.*
18
- Requires-Dist: shapely ==2.*
19
- Requires-Dist: scipy ==1.*
20
- Requires-Dist: freezegun ==1.*
21
- Requires-Dist: requests >2.23
22
-
23
- ![FM Logo](https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/_static/flood-modeller-logo-hero-image.png)
13
+ Requires-Dist: pandas<3,>1
14
+ Requires-Dist: lxml==5.*
15
+ Requires-Dist: tqdm==4.*
16
+ Requires-Dist: pytest<8,>4
17
+ Requires-Dist: pytest-mock==3.*
18
+ Requires-Dist: shapely==2.*
19
+ Requires-Dist: scipy==1.*
20
+ Requires-Dist: freezegun==1.*
21
+ Requires-Dist: requests>2.23
22
+
23
+ <picture>
24
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/_static/flood-modeller-logo-hero-image-dark.png">
25
+ <img alt="FM Logo" src="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/_static/flood-modeller-logo-hero-image.png">
26
+ </picture>
24
27
 
25
28
 
26
29
  [![PyPI Latest Release](https://img.shields.io/pypi/v/floodmodeller-api.svg)](https://pypi.org/project/floodmodeller-api/)
@@ -37,7 +40,10 @@ Requires-Dist: requests >2.23
37
40
 
38
41
  This python package is designed to be used in conjunction with your installation of Flood Modeller to provide users with a set of tools to extend the capabilities of Flood Modeller and build into automated workflows.
39
42
 
40
- ![API Overview](https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/getting_started/api_overview_small.png)
43
+ <picture>
44
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/getting_started/api_overview_dark.png">
45
+ <img alt="API Overview" src="https://raw.githubusercontent.com/People-Places-Solutions/floodmodeller-api/main/docs/source/getting_started/api_overview.png">
46
+ </picture>
41
47
 
42
48
  ## Installation
43
49
  You can install the floodmodeller_api package from PyPI with the following command:
@@ -46,7 +52,7 @@ You can install the floodmodeller_api package from PyPI with the following comma
46
52
  pip install floodmodeller-api
47
53
  ```
48
54
 
49
- Python 3.9 or greater is required.
55
+ Python 3.10 or greater is required.
50
56
 
51
57
  Once you have installed floodmodeller_api to your python environment, you can import the package with:
52
58