floodmodeller-api 0.4.2.post1__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. floodmodeller_api/__init__.py +8 -9
  2. floodmodeller_api/_base.py +169 -176
  3. floodmodeller_api/backup.py +273 -273
  4. floodmodeller_api/dat.py +889 -831
  5. floodmodeller_api/diff.py +136 -119
  6. floodmodeller_api/ied.py +302 -306
  7. floodmodeller_api/ief.py +553 -637
  8. floodmodeller_api/ief_flags.py +253 -253
  9. floodmodeller_api/inp.py +260 -266
  10. floodmodeller_api/libs/libifcoremd.dll +0 -0
  11. floodmodeller_api/libs/libifcoremt.so.5 +0 -0
  12. floodmodeller_api/libs/libifport.so.5 +0 -0
  13. floodmodeller_api/{libmmd.dll → libs/libimf.so} +0 -0
  14. floodmodeller_api/libs/libintlc.so.5 +0 -0
  15. floodmodeller_api/libs/libmmd.dll +0 -0
  16. floodmodeller_api/libs/libsvml.so +0 -0
  17. floodmodeller_api/libs/libzzn_read.so +0 -0
  18. floodmodeller_api/libs/zzn_read.dll +0 -0
  19. floodmodeller_api/logs/__init__.py +2 -2
  20. floodmodeller_api/logs/lf.py +364 -312
  21. floodmodeller_api/logs/lf_helpers.py +354 -352
  22. floodmodeller_api/logs/lf_params.py +643 -529
  23. floodmodeller_api/mapping.py +84 -0
  24. floodmodeller_api/test/__init__.py +4 -4
  25. floodmodeller_api/test/conftest.py +16 -8
  26. floodmodeller_api/test/test_backup.py +117 -117
  27. floodmodeller_api/test/test_conveyance.py +107 -0
  28. floodmodeller_api/test/test_dat.py +222 -92
  29. floodmodeller_api/test/test_data/All Units 4_6.DAT +1081 -1081
  30. floodmodeller_api/test/test_data/All Units 4_6.feb +1081 -1081
  31. floodmodeller_api/test/test_data/BRIDGE.DAT +926 -926
  32. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.dat +36 -36
  33. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.feb +36 -36
  34. floodmodeller_api/test/test_data/DamBreakADI.xml +52 -52
  35. floodmodeller_api/test/test_data/DamBreakFAST.xml +58 -58
  36. floodmodeller_api/test/test_data/DamBreakFAST_dy.xml +53 -53
  37. floodmodeller_api/test/test_data/DamBreakTVD.xml +55 -55
  38. floodmodeller_api/test/test_data/DefenceBreach.xml +53 -53
  39. floodmodeller_api/test/test_data/DefenceBreachFAST.xml +60 -60
  40. floodmodeller_api/test/test_data/DefenceBreachFAST_dy.xml +55 -55
  41. floodmodeller_api/test/test_data/Domain1+2_QH.xml +76 -76
  42. floodmodeller_api/test/test_data/Domain1_H.xml +41 -41
  43. floodmodeller_api/test/test_data/Domain1_Q.xml +41 -41
  44. floodmodeller_api/test/test_data/Domain1_Q_FAST.xml +48 -48
  45. floodmodeller_api/test/test_data/Domain1_Q_FAST_dy.xml +48 -48
  46. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +263 -0
  47. floodmodeller_api/test/test_data/Domain1_W.xml +41 -41
  48. floodmodeller_api/test/test_data/EX1.DAT +321 -321
  49. floodmodeller_api/test/test_data/EX1.ext +107 -107
  50. floodmodeller_api/test/test_data/EX1.feb +320 -320
  51. floodmodeller_api/test/test_data/EX1.gxy +107 -107
  52. floodmodeller_api/test/test_data/EX17.DAT +421 -422
  53. floodmodeller_api/test/test_data/EX17.ext +213 -213
  54. floodmodeller_api/test/test_data/EX17.feb +422 -422
  55. floodmodeller_api/test/test_data/EX18.DAT +375 -375
  56. floodmodeller_api/test/test_data/EX18_DAT_expected.json +3876 -0
  57. floodmodeller_api/test/test_data/EX2.DAT +302 -302
  58. floodmodeller_api/test/test_data/EX3.DAT +926 -926
  59. floodmodeller_api/test/test_data/EX3_DAT_expected.json +16235 -0
  60. floodmodeller_api/test/test_data/EX3_IEF_expected.json +61 -0
  61. floodmodeller_api/test/test_data/EX6.DAT +2084 -2084
  62. floodmodeller_api/test/test_data/EX6.ext +532 -532
  63. floodmodeller_api/test/test_data/EX6.feb +2084 -2084
  64. floodmodeller_api/test/test_data/EX6_DAT_expected.json +31647 -0
  65. floodmodeller_api/test/test_data/Event Data Example.DAT +336 -336
  66. floodmodeller_api/test/test_data/Event Data Example.ext +107 -107
  67. floodmodeller_api/test/test_data/Event Data Example.feb +336 -336
  68. floodmodeller_api/test/test_data/Linked1D2D.xml +52 -52
  69. floodmodeller_api/test/test_data/Linked1D2DFAST.xml +53 -53
  70. floodmodeller_api/test/test_data/Linked1D2DFAST_dy.xml +48 -48
  71. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +313 -0
  72. floodmodeller_api/test/test_data/blockage.dat +50 -50
  73. floodmodeller_api/test/test_data/blockage.ext +45 -45
  74. floodmodeller_api/test/test_data/blockage.feb +9 -9
  75. floodmodeller_api/test/test_data/blockage.gxy +71 -71
  76. floodmodeller_api/test/test_data/conveyance_test.dat +165 -0
  77. floodmodeller_api/test/test_data/conveyance_test.feb +116 -0
  78. floodmodeller_api/test/test_data/conveyance_test.gxy +85 -0
  79. floodmodeller_api/test/test_data/defaultUnits.dat +127 -127
  80. floodmodeller_api/test/test_data/defaultUnits.ext +45 -45
  81. floodmodeller_api/test/test_data/defaultUnits.feb +9 -9
  82. floodmodeller_api/test/test_data/defaultUnits.fmpx +58 -58
  83. floodmodeller_api/test/test_data/defaultUnits.gxy +85 -85
  84. floodmodeller_api/test/test_data/ex3.ief +20 -20
  85. floodmodeller_api/test/test_data/ex3.lf1 +2800 -2800
  86. floodmodeller_api/test/test_data/ex4.DAT +1374 -1374
  87. floodmodeller_api/test/test_data/ex4_changed.DAT +1374 -1374
  88. floodmodeller_api/test/test_data/example1.inp +329 -329
  89. floodmodeller_api/test/test_data/example2.inp +158 -158
  90. floodmodeller_api/test/test_data/example3.inp +297 -297
  91. floodmodeller_api/test/test_data/example4.inp +388 -388
  92. floodmodeller_api/test/test_data/example5.inp +147 -147
  93. floodmodeller_api/test/test_data/example6.inp +154 -154
  94. floodmodeller_api/test/test_data/expected_conveyance.csv +60 -0
  95. floodmodeller_api/test/test_data/jump.dat +176 -176
  96. floodmodeller_api/test/test_data/network.dat +1374 -1374
  97. floodmodeller_api/test/test_data/network.ext +45 -45
  98. floodmodeller_api/test/test_data/network.exy +1 -1
  99. floodmodeller_api/test/test_data/network.feb +45 -45
  100. floodmodeller_api/test/test_data/network.ied +45 -45
  101. floodmodeller_api/test/test_data/network.ief +20 -20
  102. floodmodeller_api/test/test_data/network.inp +147 -147
  103. floodmodeller_api/test/test_data/network.pxy +57 -57
  104. floodmodeller_api/test/test_data/network.zzd +122 -122
  105. floodmodeller_api/test/test_data/network_dat_expected.json +21837 -0
  106. floodmodeller_api/test/test_data/network_from_tabularCSV.csv +87 -87
  107. floodmodeller_api/test/test_data/network_ied_expected.json +287 -0
  108. floodmodeller_api/test/test_data/rnweir.dat +9 -9
  109. floodmodeller_api/test/test_data/rnweir.ext +45 -45
  110. floodmodeller_api/test/test_data/rnweir.feb +9 -9
  111. floodmodeller_api/test/test_data/rnweir.gxy +45 -45
  112. floodmodeller_api/test/test_data/rnweir_default.dat +74 -74
  113. floodmodeller_api/test/test_data/rnweir_default.ext +45 -45
  114. floodmodeller_api/test/test_data/rnweir_default.feb +9 -9
  115. floodmodeller_api/test/test_data/rnweir_default.fmpx +58 -58
  116. floodmodeller_api/test/test_data/rnweir_default.gxy +53 -53
  117. floodmodeller_api/test/test_data/unit checks.dat +16 -16
  118. floodmodeller_api/test/test_ied.py +29 -29
  119. floodmodeller_api/test/test_ief.py +136 -24
  120. floodmodeller_api/test/test_inp.py +47 -48
  121. floodmodeller_api/test/test_json.py +114 -0
  122. floodmodeller_api/test/test_logs_lf.py +102 -51
  123. floodmodeller_api/test/test_tool.py +165 -152
  124. floodmodeller_api/test/test_toolbox_structure_log.py +234 -239
  125. floodmodeller_api/test/test_xml2d.py +151 -156
  126. floodmodeller_api/test/test_zzn.py +36 -34
  127. floodmodeller_api/to_from_json.py +230 -0
  128. floodmodeller_api/tool.py +332 -329
  129. floodmodeller_api/toolbox/__init__.py +5 -5
  130. floodmodeller_api/toolbox/example_tool.py +45 -45
  131. floodmodeller_api/toolbox/model_build/__init__.py +2 -2
  132. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +100 -98
  133. floodmodeller_api/toolbox/model_build/structure_log/__init__.py +1 -1
  134. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +287 -289
  135. floodmodeller_api/toolbox/model_build/structure_log_definition.py +76 -76
  136. floodmodeller_api/units/__init__.py +10 -10
  137. floodmodeller_api/units/_base.py +214 -212
  138. floodmodeller_api/units/boundaries.py +467 -467
  139. floodmodeller_api/units/comment.py +52 -55
  140. floodmodeller_api/units/conduits.py +382 -402
  141. floodmodeller_api/units/conveyance.py +301 -0
  142. floodmodeller_api/units/helpers.py +123 -131
  143. floodmodeller_api/units/iic.py +107 -101
  144. floodmodeller_api/units/losses.py +305 -306
  145. floodmodeller_api/units/sections.py +465 -446
  146. floodmodeller_api/units/structures.py +1690 -1683
  147. floodmodeller_api/units/units.py +93 -104
  148. floodmodeller_api/units/unsupported.py +44 -44
  149. floodmodeller_api/units/variables.py +87 -89
  150. floodmodeller_api/urban1d/__init__.py +11 -11
  151. floodmodeller_api/urban1d/_base.py +188 -179
  152. floodmodeller_api/urban1d/conduits.py +93 -85
  153. floodmodeller_api/urban1d/general_parameters.py +58 -58
  154. floodmodeller_api/urban1d/junctions.py +81 -79
  155. floodmodeller_api/urban1d/losses.py +81 -74
  156. floodmodeller_api/urban1d/outfalls.py +114 -110
  157. floodmodeller_api/urban1d/raingauges.py +111 -111
  158. floodmodeller_api/urban1d/subsections.py +92 -98
  159. floodmodeller_api/urban1d/xsections.py +147 -144
  160. floodmodeller_api/util.py +119 -21
  161. floodmodeller_api/validation/parameters.py +660 -660
  162. floodmodeller_api/validation/urban_parameters.py +388 -404
  163. floodmodeller_api/validation/validation.py +110 -108
  164. floodmodeller_api/version.py +1 -1
  165. floodmodeller_api/xml2d.py +632 -673
  166. floodmodeller_api/xml2d_template.py +37 -37
  167. floodmodeller_api/zzn.py +414 -363
  168. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/LICENSE.txt +13 -13
  169. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/METADATA +85 -82
  170. floodmodeller_api-0.4.4.dist-info/RECORD +185 -0
  171. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/WHEEL +1 -1
  172. floodmodeller_api/libifcoremd.dll +0 -0
  173. floodmodeller_api/test/test_data/EX3.bmp +0 -0
  174. floodmodeller_api/test/test_data/test_output.csv +0 -87
  175. floodmodeller_api/zzn_read.dll +0 -0
  176. floodmodeller_api-0.4.2.post1.dist-info/RECORD +0 -164
  177. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/entry_points.txt +0 -0
  178. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/top_level.txt +0 -0
floodmodeller_api/zzn.py CHANGED
@@ -1,363 +1,414 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2023 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- import ctypes as ct
18
- from pathlib import Path
19
- from typing import Any, Dict, Optional, Union
20
-
21
- import numpy as np
22
- import pandas as pd
23
-
24
- from ._base import FMFile
25
-
26
-
27
- class ZZN(FMFile):
28
- """Reads and processes Flood Modeller 1D binary results format '.zzn'
29
-
30
- Args:
31
- zzn_filepath (str): Full filepath to model zzn file
32
-
33
- Output:
34
- Initiates 'ZZN' class object
35
- """
36
-
37
- _filetype: str = "ZZN"
38
- _suffix: str = ".zzn"
39
-
40
- def __init__(self, zzn_filepath: Optional[Union[str, Path]]):
41
- try:
42
- FMFile.__init__(self, zzn_filepath)
43
-
44
- # Get zzn_dll path
45
- zzn_dll = Path(Path(__file__).resolve().parent, "zzn_read.dll")
46
- # Using str() method as CDLL doesn't seem to like accepting Path object
47
- zzn_read = ct.CDLL(str(zzn_dll))
48
-
49
- # Get zzl path
50
- zzn = self._filepath
51
- zzl = zzn.with_suffix(".zzl")
52
- if not zzl.exists():
53
- raise FileNotFoundError(
54
- "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name."
55
- )
56
-
57
- self.meta: Dict[str, Any] = {} # Dict object to hold all metadata
58
- self.data = {} # Dict object to hold all data
59
-
60
- # PROCESS_ZZL
61
- self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
62
- self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
63
- self.meta["model_title"] = ct.create_string_buffer(b"", 128)
64
- self.meta["nnodes"] = ct.c_int(0)
65
- self.meta["label_length"] = ct.c_int(0)
66
- self.meta["dt"] = ct.c_float(0.0)
67
- self.meta["timestep0"] = ct.c_int(0)
68
- self.meta["ltimestep"] = ct.c_int(0)
69
- self.meta["save_int"] = ct.c_float(0.0)
70
- self.meta["is_quality"] = ct.c_bool(False)
71
- self.meta["nvars"] = ct.c_int(0)
72
- self.meta["tzero"] = (ct.c_int * 5)()
73
- self.meta["errstat"] = ct.c_int(0)
74
- zzn_read.PROCESS_ZZL(
75
- ct.byref(self.meta["zzl_name"]),
76
- ct.byref(self.meta["model_title"]),
77
- ct.byref(self.meta["nnodes"]),
78
- ct.byref(self.meta["label_length"]),
79
- ct.byref(self.meta["dt"]),
80
- ct.byref(self.meta["timestep0"]),
81
- ct.byref(self.meta["ltimestep"]),
82
- ct.byref(self.meta["save_int"]),
83
- ct.byref(self.meta["is_quality"]),
84
- ct.byref(self.meta["nvars"]),
85
- ct.byref(self.meta["tzero"]),
86
- ct.byref(self.meta["errstat"]),
87
- )
88
- # PROCESS_LABELS
89
- self.meta["labels"] = (
90
- ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
91
- )()
92
- zzn_read.PROCESS_LABELS(
93
- ct.byref(self.meta["zzl_name"]),
94
- ct.byref(self.meta["nnodes"]),
95
- ct.byref(self.meta["labels"]),
96
- ct.byref(self.meta["label_length"]),
97
- ct.byref(self.meta["errstat"]),
98
- )
99
- # PREPROCESS_ZZN
100
- last_hr = (
101
- (self.meta["ltimestep"].value - self.meta["timestep0"].value)
102
- * self.meta["dt"].value
103
- / 3600
104
- )
105
- self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
106
- self.meta["aitimestep"] = (ct.c_int * 2)(
107
- self.meta["timestep0"].value, self.meta["ltimestep"].value
108
- )
109
- self.meta["isavint"] = (ct.c_int * 2)()
110
- zzn_read.PREPROCESS_ZZN(
111
- ct.byref(self.meta["output_hrs"]),
112
- ct.byref(self.meta["aitimestep"]),
113
- ct.byref(self.meta["dt"]),
114
- ct.byref(self.meta["timestep0"]),
115
- ct.byref(self.meta["ltimestep"]),
116
- ct.byref(self.meta["save_int"]),
117
- ct.byref(self.meta["isavint"]),
118
- )
119
- # PROCESS_ZZN
120
- self.meta["node_ID"] = ct.c_int(-1)
121
- self.meta["savint_skip"] = ct.c_int(1)
122
- self.meta["savint_range"] = ct.c_int(
123
- int(
124
- (
125
- (self.meta["isavint"][1] - self.meta["isavint"][0])
126
- / self.meta["savint_skip"].value
127
- )
128
- )
129
- )
130
- nx = self.meta["nnodes"].value
131
- ny = self.meta["nvars"].value
132
- nz = self.meta["savint_range"].value + 1
133
- self.data["all_results"] = (ct.c_float * nx * ny * nz)()
134
- self.data["max_results"] = (ct.c_float * nx * ny)()
135
- self.data["min_results"] = (ct.c_float * nx * ny)()
136
- self.data["max_times"] = (ct.c_int * nx * ny)()
137
- self.data["min_times"] = (ct.c_int * nx * ny)()
138
- zzn_read.PROCESS_ZZN(
139
- ct.byref(self.meta["zzn_name"]),
140
- ct.byref(self.meta["node_ID"]),
141
- ct.byref(self.meta["nnodes"]),
142
- ct.byref(self.meta["is_quality"]),
143
- ct.byref(self.meta["nvars"]),
144
- ct.byref(self.meta["savint_range"]),
145
- ct.byref(self.meta["savint_skip"]),
146
- ct.byref(self.data["all_results"]),
147
- ct.byref(self.data["max_results"]),
148
- ct.byref(self.data["min_results"]),
149
- ct.byref(self.data["max_times"]),
150
- ct.byref(self.data["min_times"]),
151
- ct.byref(self.meta["errstat"]),
152
- ct.byref(self.meta["isavint"]),
153
- )
154
-
155
- # Convert useful metadata from C types into python types
156
-
157
- self.meta["dt"] = self.meta["dt"].value
158
- self.meta["nnodes"] = self.meta["nnodes"].value
159
- self.meta["save_int"] = self.meta["save_int"].value
160
- self.meta["nvars"] = self.meta["nvars"].value
161
- self.meta["savint_range"] = self.meta["savint_range"].value
162
-
163
- self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
164
- self.meta["labels"] = [
165
- label.value.decode().strip() for label in list(self.meta["labels"])
166
- ]
167
- self.meta["model_title"] = self.meta["model_title"].value.decode()
168
-
169
- except Exception as e:
170
- self._handle_exception(e, when="read")
171
-
172
- def to_dataframe(
173
- self,
174
- result_type: str = "all",
175
- variable: str = "all",
176
- include_time: bool = False,
177
- multilevel_header: bool = True,
178
- ) -> Union[pd.Series, pd.DataFrame]:
179
- """Loads zzn results to pandas dataframe object.
180
-
181
- Args:
182
- result_type (str, optional): {'all'} | 'max' | 'min'
183
- Define whether to return all timesteps or just max/min results. Defaults to 'all'.
184
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
185
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
186
- include_time (bool, optional):
187
- Whether to include the time of max or min results. Defaults to False.
188
- multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
189
- headers with the variable as first level and node label as second header. If False, the column
190
- names will be formatted "{node label}_{variable}". Defaults to True.
191
-
192
- Returns:
193
- pandas.DataFrame(): dataframe object of simulation results
194
- """
195
- nx = self.meta["nnodes"]
196
- ny = self.meta["nvars"]
197
- nz = self.meta["savint_range"] + 1
198
- result_type = result_type.lower()
199
-
200
- if result_type == "all":
201
- arr = np.array(self.data["all_results"])
202
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
203
- vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
204
- if multilevel_header:
205
- col_names = [vars_list, self.meta["labels"]]
206
- df = pd.DataFrame(
207
- arr.reshape(nz, nx * ny),
208
- index=time_index,
209
- columns=pd.MultiIndex.from_product(col_names),
210
- )
211
- df.index.name = "Time (hr)"
212
- if variable != "all":
213
- return df[variable.capitalize()]
214
-
215
- else:
216
- col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
217
- df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
218
- df.index.name = "Time (hr)"
219
- if variable != "all":
220
- use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
221
- return df[use_cols]
222
- return df
223
-
224
- if result_type in ("max", "min"):
225
- arr = np.array(self.data[f"{result_type}_results"]).transpose()
226
- node_index = self.meta["labels"]
227
- col_names = [
228
- result_type.capitalize() + lbl
229
- for lbl in [
230
- " Flow",
231
- " Stage",
232
- " Froude",
233
- " Velocity",
234
- " Mode",
235
- " State",
236
- ]
237
- ]
238
- df = pd.DataFrame(arr, index=node_index, columns=col_names)
239
- df.index.name = "Node Label"
240
-
241
- if include_time:
242
- times = np.array(self.data[f"{result_type}_times"]).transpose()
243
- # transform timestep into hrs
244
- times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
245
- time_col_names = [name + " Time(hrs)" for name in col_names]
246
- time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
247
- time_df.index.name = "Node Label"
248
- df = pd.concat([df, time_df], axis=1)
249
- new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
250
- df = df[new_col_order]
251
- if variable != "all":
252
- return df[
253
- [
254
- f"{result_type.capitalize()} {variable.capitalize()}",
255
- f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
256
- ]
257
- ]
258
- return df
259
-
260
- if variable != "all":
261
- return df[f"{result_type.capitalize()} {variable.capitalize()}"]
262
- return df
263
-
264
- raise ValueError(f'Result type: "{result_type}" not recognised')
265
-
266
- def export_to_csv(
267
- self,
268
- save_location: Union[str, Path] = "default",
269
- result_type: str = "all",
270
- variable: str = "all",
271
- include_time: bool = False,
272
- ) -> None:
273
- """Exports zzn results to CSV file.
274
-
275
- Args:
276
- save_location (str, optional): {default} | folder or file path
277
- Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
278
- result_type (str, optional): {all} | max | min
279
- Define whether to output all timesteps or just max/min results. Defaults to 'all'.
280
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
281
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
282
- include_time (bool, optional):
283
- Whether to include the time of max or min results. Defaults to False.
284
-
285
- Raises:
286
- Exception: Raised if result_type set to invalid option
287
- """
288
- if save_location == "default":
289
- save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
290
- else:
291
- save_location = Path(save_location)
292
- if not save_location.is_absolute():
293
- # for if relative folder path given
294
- save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
295
-
296
- if save_location.suffix != ".csv": # Assumed to be pointing to a folder
297
- # Check if the folder exists, if not create it
298
- if not save_location.exists():
299
- Path.mkdir(save_location)
300
- save_location = Path(
301
- save_location, Path(self.meta["zzn_name"]).with_suffix(".csv").name
302
- )
303
-
304
- else:
305
- if not save_location.parent.exists():
306
- Path.mkdir(save_location.parent)
307
-
308
- result_type = result_type.lower()
309
-
310
- if result_type.lower() not in ["all", "max", "min"]:
311
- raise Exception(
312
- f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' "
313
- )
314
-
315
- df = self.to_dataframe(
316
- result_type=result_type, variable=variable, include_time=include_time
317
- )
318
- df.to_csv(save_location)
319
- print(f"CSV saved to {save_location}")
320
-
321
- def to_dict_of_dataframes(self, variable: str = "all") -> dict:
322
- """Loads zzn results to a dictionary of pandas dataframe objects.
323
-
324
- Args:
325
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
326
- Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
327
- variable names. Defaults to 'all'.
328
-
329
- Returns:
330
- dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
331
- """
332
- nx = self.meta["nnodes"]
333
- ny = self.meta["nvars"]
334
- nz = self.meta["savint_range"] + 1
335
- output = {}
336
-
337
- arr = np.array(self.data["all_results"])
338
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
339
-
340
- vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
341
-
342
- col_names = self.meta["labels"]
343
- temp_arr = np.reshape(arr, (nz, ny, nx))
344
-
345
- for i, var in enumerate(vars_list):
346
- output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
347
- output[var].index.name = "Time (hr)"
348
-
349
- output["Time (hr)"] = time_index
350
-
351
- if variable != "all":
352
- input_vars = variable.split(",")
353
- for i, var in enumerate(input_vars):
354
- input_vars[i] = var.strip().capitalize()
355
- if input_vars[i] not in vars_list:
356
- raise Exception(
357
- f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} "
358
- )
359
-
360
- for var in vars_list:
361
- if var not in input_vars:
362
- del output[var]
363
- return output
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import ctypes as ct
20
+ from pathlib import Path
21
+ from typing import Any
22
+
23
+ import numpy as np
24
+ import pandas as pd
25
+
26
+ from ._base import FMFile
27
+ from .to_from_json import to_json
28
+ from .util import handle_exception, is_windows
29
+
30
+
31
+ class ZZN(FMFile):
32
+ """Reads and processes Flood Modeller 1D binary results format '.zzn'
33
+
34
+ Args:
35
+ zzn_filepath (str): Full filepath to model zzn file
36
+
37
+ Output:
38
+ Initiates 'ZZN' class object
39
+ """
40
+
41
+ _filetype: str = "ZZN"
42
+ _suffix: str = ".zzn"
43
+
44
+ @handle_exception(when="read")
45
+ def __init__( # noqa: PLR0915
46
+ self,
47
+ zzn_filepath: str | Path | None = None,
48
+ from_json: bool = False,
49
+ ):
50
+ if from_json:
51
+ return
52
+ FMFile.__init__(self, zzn_filepath)
53
+
54
+ # Get zzn_dll path
55
+ lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
56
+ zzn_dll = Path(__file__).resolve().parent / "libs" / lib
57
+
58
+ # Catch LD_LIBRARY_PATH error for linux
59
+ try:
60
+ zzn_read = ct.CDLL(str(zzn_dll))
61
+ except OSError as e:
62
+ msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
63
+ if msg_1 in str(e):
64
+ msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
65
+ raise OSError(msg_2) from e
66
+ raise
67
+
68
+ # Get zzl path
69
+ zzn = self._filepath
70
+ zzl = zzn.with_suffix(".zzl")
71
+ if not zzl.exists():
72
+ raise FileNotFoundError(
73
+ "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name.",
74
+ )
75
+
76
+ self.meta: dict[str, Any] = {} # Dict object to hold all metadata
77
+ self.data = {} # Dict object to hold all data
78
+
79
+ # PROCESS_ZZL
80
+ self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
81
+ self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
82
+ self.meta["model_title"] = ct.create_string_buffer(b"", 128)
83
+ self.meta["nnodes"] = ct.c_int(0)
84
+ self.meta["label_length"] = ct.c_int(0)
85
+ self.meta["dt"] = ct.c_float(0.0)
86
+ self.meta["timestep0"] = ct.c_int(0)
87
+ self.meta["ltimestep"] = ct.c_int(0)
88
+ self.meta["save_int"] = ct.c_float(0.0)
89
+ self.meta["is_quality"] = ct.c_bool(False)
90
+ self.meta["nvars"] = ct.c_int(0)
91
+ self.meta["tzero"] = (ct.c_int * 5)()
92
+ self.meta["errstat"] = ct.c_int(0)
93
+ zzn_read.process_zzl(
94
+ ct.byref(self.meta["zzl_name"]),
95
+ ct.byref(self.meta["model_title"]),
96
+ ct.byref(self.meta["nnodes"]),
97
+ ct.byref(self.meta["label_length"]),
98
+ ct.byref(self.meta["dt"]),
99
+ ct.byref(self.meta["timestep0"]),
100
+ ct.byref(self.meta["ltimestep"]),
101
+ ct.byref(self.meta["save_int"]),
102
+ ct.byref(self.meta["is_quality"]),
103
+ ct.byref(self.meta["nvars"]),
104
+ ct.byref(self.meta["tzero"]),
105
+ ct.byref(self.meta["errstat"]),
106
+ )
107
+ # PROCESS_LABELS
108
+ self.meta["labels"] = (
109
+ ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
110
+ )()
111
+ zzn_read.process_labels(
112
+ ct.byref(self.meta["zzl_name"]),
113
+ ct.byref(self.meta["nnodes"]),
114
+ ct.byref(self.meta["label_length"]),
115
+ ct.byref(self.meta["errstat"]),
116
+ )
117
+ for i in range(self.meta["nnodes"].value):
118
+ zzn_read.get_zz_label(
119
+ ct.byref(ct.c_int(i + 1)),
120
+ ct.byref(self.meta["labels"][i]),
121
+ ct.byref(self.meta["errstat"]),
122
+ )
123
+ # PREPROCESS_ZZN
124
+ last_hr = (
125
+ (self.meta["ltimestep"].value - self.meta["timestep0"].value)
126
+ * self.meta["dt"].value
127
+ / 3600
128
+ )
129
+ self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
130
+ self.meta["aitimestep"] = (ct.c_int * 2)(
131
+ self.meta["timestep0"].value,
132
+ self.meta["ltimestep"].value,
133
+ )
134
+ self.meta["isavint"] = (ct.c_int * 2)()
135
+ zzn_read.preprocess_zzn(
136
+ ct.byref(self.meta["output_hrs"]),
137
+ ct.byref(self.meta["aitimestep"]),
138
+ ct.byref(self.meta["dt"]),
139
+ ct.byref(self.meta["timestep0"]),
140
+ ct.byref(self.meta["ltimestep"]),
141
+ ct.byref(self.meta["save_int"]),
142
+ ct.byref(self.meta["isavint"]),
143
+ )
144
+ # PROCESS_ZZN
145
+ self.meta["node_ID"] = ct.c_int(-1)
146
+ self.meta["savint_skip"] = ct.c_int(1)
147
+ self.meta["savint_range"] = ct.c_int(
148
+ int(
149
+ (self.meta["isavint"][1] - self.meta["isavint"][0])
150
+ / self.meta["savint_skip"].value,
151
+ ),
152
+ )
153
+ nx = self.meta["nnodes"].value
154
+ ny = self.meta["nvars"].value
155
+ nz = self.meta["savint_range"].value + 1
156
+ self.data["all_results"] = (ct.c_float * nx * ny * nz)()
157
+ self.data["max_results"] = (ct.c_float * nx * ny)()
158
+ self.data["min_results"] = (ct.c_float * nx * ny)()
159
+ self.data["max_times"] = (ct.c_int * nx * ny)()
160
+ self.data["min_times"] = (ct.c_int * nx * ny)()
161
+ zzn_read.process_zzn(
162
+ ct.byref(self.meta["zzn_name"]),
163
+ ct.byref(self.meta["node_ID"]),
164
+ ct.byref(self.meta["nnodes"]),
165
+ ct.byref(self.meta["is_quality"]),
166
+ ct.byref(self.meta["nvars"]),
167
+ ct.byref(self.meta["savint_range"]),
168
+ ct.byref(self.meta["savint_skip"]),
169
+ ct.byref(self.data["all_results"]),
170
+ ct.byref(self.data["max_results"]),
171
+ ct.byref(self.data["min_results"]),
172
+ ct.byref(self.data["max_times"]),
173
+ ct.byref(self.data["min_times"]),
174
+ ct.byref(self.meta["errstat"]),
175
+ ct.byref(self.meta["isavint"]),
176
+ )
177
+
178
+ # Convert useful metadata from C types into python types
179
+
180
+ self.meta["dt"] = self.meta["dt"].value
181
+ self.meta["nnodes"] = self.meta["nnodes"].value
182
+ self.meta["save_int"] = self.meta["save_int"].value
183
+ self.meta["nvars"] = self.meta["nvars"].value
184
+ self.meta["savint_range"] = self.meta["savint_range"].value
185
+
186
+ self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
187
+ self.meta["labels"] = [label.value.decode().strip() for label in list(self.meta["labels"])]
188
+ self.meta["model_title"] = self.meta["model_title"].value.decode()
189
+
190
+ def to_dataframe( # noqa: PLR0911
191
+ self,
192
+ result_type: str = "all",
193
+ variable: str = "all",
194
+ include_time: bool = False,
195
+ multilevel_header: bool = True,
196
+ ) -> pd.Series | pd.DataFrame:
197
+ """Loads zzn results to pandas dataframe object.
198
+
199
+ Args:
200
+ result_type (str, optional): {'all'} | 'max' | 'min'
201
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
202
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
203
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
204
+ include_time (bool, optional):
205
+ Whether to include the time of max or min results. Defaults to False.
206
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
207
+ headers with the variable as first level and node label as second header. If False, the column
208
+ names will be formatted "{node label}_{variable}". Defaults to True.
209
+
210
+ Returns:
211
+ pandas.DataFrame(): dataframe object of simulation results
212
+ """
213
+ nx = self.meta["nnodes"]
214
+ ny = self.meta["nvars"]
215
+ nz = self.meta["savint_range"] + 1
216
+ result_type = result_type.lower()
217
+
218
+ if result_type == "all":
219
+ arr = np.array(self.data["all_results"])
220
+ time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
221
+ vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
222
+ if multilevel_header:
223
+ col_names = [vars_list, self.meta["labels"]]
224
+ df = pd.DataFrame(
225
+ arr.reshape(nz, nx * ny),
226
+ index=time_index,
227
+ columns=pd.MultiIndex.from_product(col_names),
228
+ )
229
+ df.index.name = "Time (hr)"
230
+ if variable != "all":
231
+ return df[variable.capitalize()]
232
+
233
+ else:
234
+ col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
235
+ df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
236
+ df.index.name = "Time (hr)"
237
+ if variable != "all":
238
+ use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
239
+ return df[use_cols]
240
+ return df
241
+
242
+ if result_type in ("max", "min"):
243
+ arr = np.array(self.data[f"{result_type}_results"]).transpose()
244
+ node_index = self.meta["labels"]
245
+ col_names = [
246
+ result_type.capitalize() + lbl
247
+ for lbl in [
248
+ " Flow",
249
+ " Stage",
250
+ " Froude",
251
+ " Velocity",
252
+ " Mode",
253
+ " State",
254
+ ]
255
+ ]
256
+ df = pd.DataFrame(arr, index=node_index, columns=col_names)
257
+ df.index.name = "Node Label"
258
+
259
+ if include_time:
260
+ times = np.array(self.data[f"{result_type}_times"]).transpose()
261
+ # transform timestep into hrs
262
+ times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
263
+ time_col_names = [name + " Time(hrs)" for name in col_names]
264
+ time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
265
+ time_df.index.name = "Node Label"
266
+ df = pd.concat([df, time_df], axis=1)
267
+ new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
268
+ df = df[new_col_order]
269
+ if variable != "all":
270
+ return df[
271
+ [
272
+ f"{result_type.capitalize()} {variable.capitalize()}",
273
+ f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
274
+ ]
275
+ ]
276
+ return df
277
+
278
+ if variable != "all":
279
+ return df[f"{result_type.capitalize()} {variable.capitalize()}"]
280
+ return df
281
+
282
+ raise ValueError(f'Result type: "{result_type}" not recognised')
283
+
284
+ def export_to_csv(
285
+ self,
286
+ save_location: str | Path = "default",
287
+ result_type: str = "all",
288
+ variable: str = "all",
289
+ include_time: bool = False,
290
+ ) -> None:
291
+ """Exports zzn results to CSV file.
292
+
293
+ Args:
294
+ save_location (str, optional): {default} | folder or file path
295
+ Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
296
+ result_type (str, optional): {all} | max | min
297
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
298
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
299
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
300
+ include_time (bool, optional):
301
+ Whether to include the time of max or min results. Defaults to False.
302
+
303
+ Raises:
304
+ Exception: Raised if result_type set to invalid option
305
+ """
306
+ if save_location == "default":
307
+ save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
308
+ else:
309
+ save_location = Path(save_location)
310
+ if not save_location.is_absolute():
311
+ # for if relative folder path given
312
+ save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
313
+
314
+ if save_location.suffix != ".csv": # Assumed to be pointing to a folder
315
+ # Check if the folder exists, if not create it
316
+ if not save_location.exists():
317
+ Path.mkdir(save_location)
318
+ save_location = Path(
319
+ save_location,
320
+ Path(self.meta["zzn_name"]).with_suffix(".csv").name,
321
+ )
322
+
323
+ elif not save_location.parent.exists():
324
+ Path.mkdir(save_location.parent)
325
+
326
+ result_type = result_type.lower()
327
+
328
+ if result_type.lower() not in ["all", "max", "min"]:
329
+ raise Exception(
330
+ f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' ",
331
+ )
332
+
333
+ df = self.to_dataframe(
334
+ result_type=result_type,
335
+ variable=variable,
336
+ include_time=include_time,
337
+ )
338
+ df.to_csv(save_location)
339
+ print(f"CSV saved to {save_location}")
340
+
341
+ def to_dict_of_dataframes(self, variable: str = "all") -> dict:
342
+ """Loads zzn results to a dictionary of pandas dataframe objects.
343
+
344
+ Args:
345
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
346
+ Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
347
+ variable names. Defaults to 'all'.
348
+
349
+ Returns:
350
+ dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
351
+ """
352
+ nx = self.meta["nnodes"]
353
+ ny = self.meta["nvars"]
354
+ nz = self.meta["savint_range"] + 1
355
+ output = {}
356
+
357
+ arr = np.array(self.data["all_results"])
358
+ time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
359
+
360
+ vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
361
+
362
+ col_names = self.meta["labels"]
363
+ temp_arr = np.reshape(arr, (nz, ny, nx))
364
+
365
+ for i, var in enumerate(vars_list):
366
+ output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
367
+ output[var].index.name = "Time (hr)"
368
+
369
+ output["Time (hr)"] = time_index
370
+
371
+ if variable != "all":
372
+ input_vars = variable.split(",")
373
+ for i, var in enumerate(input_vars):
374
+ input_vars[i] = var.strip().capitalize()
375
+ if input_vars[i] not in vars_list:
376
+ raise Exception(
377
+ f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} ",
378
+ )
379
+
380
+ for var in vars_list:
381
+ if var not in input_vars:
382
+ del output[var]
383
+ return output
384
+
385
+ def to_json(
386
+ self,
387
+ result_type: str = "all",
388
+ variable: str = "all",
389
+ include_time: bool = False,
390
+ multilevel_header: bool = True,
391
+ ) -> str:
392
+ """Loads zzn results to JSON object.
393
+
394
+ Args:
395
+ result_type (str, optional): {'all'} | 'max' | 'min'
396
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
397
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
398
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
399
+ include_time (bool, optional):
400
+ Whether to include the time of max or min results. Defaults to False.
401
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
402
+ headers with the variable as first level and node label as second header. If False, the column
403
+ names will be formatted "{node label}_{variable}". Defaults to True.
404
+
405
+ Returns:
406
+ str: A JSON string representing the ZZN results.
407
+ """
408
+ df = self.to_dataframe(result_type, variable, include_time, multilevel_header)
409
+ return to_json(df)
410
+
411
+ @classmethod
412
+ def from_json(cls, json_string: str = ""):
413
+ # Not possible
414
+ raise NotImplementedError("It is not possible to build a ZZN class instance from JSON")