floodmodeller-api 0.4.2.post1__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. floodmodeller_api/__init__.py +8 -9
  2. floodmodeller_api/_base.py +184 -176
  3. floodmodeller_api/backup.py +273 -273
  4. floodmodeller_api/dat.py +909 -831
  5. floodmodeller_api/diff.py +136 -119
  6. floodmodeller_api/ied.py +307 -306
  7. floodmodeller_api/ief.py +647 -637
  8. floodmodeller_api/ief_flags.py +253 -253
  9. floodmodeller_api/inp.py +266 -266
  10. floodmodeller_api/libs/libifcoremd.dll +0 -0
  11. floodmodeller_api/libs/libifcoremt.so.5 +0 -0
  12. floodmodeller_api/libs/libifport.so.5 +0 -0
  13. floodmodeller_api/{libmmd.dll → libs/libimf.so} +0 -0
  14. floodmodeller_api/libs/libintlc.so.5 +0 -0
  15. floodmodeller_api/libs/libmmd.dll +0 -0
  16. floodmodeller_api/libs/libsvml.so +0 -0
  17. floodmodeller_api/libs/libzzn_read.so +0 -0
  18. floodmodeller_api/libs/zzn_read.dll +0 -0
  19. floodmodeller_api/logs/__init__.py +2 -2
  20. floodmodeller_api/logs/lf.py +320 -312
  21. floodmodeller_api/logs/lf_helpers.py +354 -352
  22. floodmodeller_api/logs/lf_params.py +643 -529
  23. floodmodeller_api/mapping.py +84 -0
  24. floodmodeller_api/test/__init__.py +4 -4
  25. floodmodeller_api/test/conftest.py +9 -8
  26. floodmodeller_api/test/test_backup.py +117 -117
  27. floodmodeller_api/test/test_dat.py +221 -92
  28. floodmodeller_api/test/test_data/All Units 4_6.DAT +1081 -1081
  29. floodmodeller_api/test/test_data/All Units 4_6.feb +1081 -1081
  30. floodmodeller_api/test/test_data/BRIDGE.DAT +926 -926
  31. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.dat +36 -36
  32. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.feb +36 -36
  33. floodmodeller_api/test/test_data/DamBreakADI.xml +52 -52
  34. floodmodeller_api/test/test_data/DamBreakFAST.xml +58 -58
  35. floodmodeller_api/test/test_data/DamBreakFAST_dy.xml +53 -53
  36. floodmodeller_api/test/test_data/DamBreakTVD.xml +55 -55
  37. floodmodeller_api/test/test_data/DefenceBreach.xml +53 -53
  38. floodmodeller_api/test/test_data/DefenceBreachFAST.xml +60 -60
  39. floodmodeller_api/test/test_data/DefenceBreachFAST_dy.xml +55 -55
  40. floodmodeller_api/test/test_data/Domain1+2_QH.xml +76 -76
  41. floodmodeller_api/test/test_data/Domain1_H.xml +41 -41
  42. floodmodeller_api/test/test_data/Domain1_Q.xml +41 -41
  43. floodmodeller_api/test/test_data/Domain1_Q_FAST.xml +48 -48
  44. floodmodeller_api/test/test_data/Domain1_Q_FAST_dy.xml +48 -48
  45. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +263 -0
  46. floodmodeller_api/test/test_data/Domain1_W.xml +41 -41
  47. floodmodeller_api/test/test_data/EX1.DAT +321 -321
  48. floodmodeller_api/test/test_data/EX1.ext +107 -107
  49. floodmodeller_api/test/test_data/EX1.feb +320 -320
  50. floodmodeller_api/test/test_data/EX1.gxy +107 -107
  51. floodmodeller_api/test/test_data/EX17.DAT +421 -422
  52. floodmodeller_api/test/test_data/EX17.ext +213 -213
  53. floodmodeller_api/test/test_data/EX17.feb +422 -422
  54. floodmodeller_api/test/test_data/EX18.DAT +375 -375
  55. floodmodeller_api/test/test_data/EX18_DAT_expected.json +3876 -0
  56. floodmodeller_api/test/test_data/EX2.DAT +302 -302
  57. floodmodeller_api/test/test_data/EX3.DAT +926 -926
  58. floodmodeller_api/test/test_data/EX3_DAT_expected.json +16235 -0
  59. floodmodeller_api/test/test_data/EX3_IEF_expected.json +61 -0
  60. floodmodeller_api/test/test_data/EX6.DAT +2084 -2084
  61. floodmodeller_api/test/test_data/EX6.ext +532 -532
  62. floodmodeller_api/test/test_data/EX6.feb +2084 -2084
  63. floodmodeller_api/test/test_data/EX6_DAT_expected.json +31647 -0
  64. floodmodeller_api/test/test_data/Event Data Example.DAT +336 -336
  65. floodmodeller_api/test/test_data/Event Data Example.ext +107 -107
  66. floodmodeller_api/test/test_data/Event Data Example.feb +336 -336
  67. floodmodeller_api/test/test_data/Linked1D2D.xml +52 -52
  68. floodmodeller_api/test/test_data/Linked1D2DFAST.xml +53 -53
  69. floodmodeller_api/test/test_data/Linked1D2DFAST_dy.xml +48 -48
  70. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +313 -0
  71. floodmodeller_api/test/test_data/blockage.dat +50 -50
  72. floodmodeller_api/test/test_data/blockage.ext +45 -45
  73. floodmodeller_api/test/test_data/blockage.feb +9 -9
  74. floodmodeller_api/test/test_data/blockage.gxy +71 -71
  75. floodmodeller_api/test/test_data/defaultUnits.dat +127 -127
  76. floodmodeller_api/test/test_data/defaultUnits.ext +45 -45
  77. floodmodeller_api/test/test_data/defaultUnits.feb +9 -9
  78. floodmodeller_api/test/test_data/defaultUnits.fmpx +58 -58
  79. floodmodeller_api/test/test_data/defaultUnits.gxy +85 -85
  80. floodmodeller_api/test/test_data/ex3.ief +20 -20
  81. floodmodeller_api/test/test_data/ex3.lf1 +2800 -2800
  82. floodmodeller_api/test/test_data/ex4.DAT +1374 -1374
  83. floodmodeller_api/test/test_data/ex4_changed.DAT +1374 -1374
  84. floodmodeller_api/test/test_data/example1.inp +329 -329
  85. floodmodeller_api/test/test_data/example2.inp +158 -158
  86. floodmodeller_api/test/test_data/example3.inp +297 -297
  87. floodmodeller_api/test/test_data/example4.inp +388 -388
  88. floodmodeller_api/test/test_data/example5.inp +147 -147
  89. floodmodeller_api/test/test_data/example6.inp +154 -154
  90. floodmodeller_api/test/test_data/jump.dat +176 -176
  91. floodmodeller_api/test/test_data/network.dat +1374 -1374
  92. floodmodeller_api/test/test_data/network.ext +45 -45
  93. floodmodeller_api/test/test_data/network.exy +1 -1
  94. floodmodeller_api/test/test_data/network.feb +45 -45
  95. floodmodeller_api/test/test_data/network.ied +45 -45
  96. floodmodeller_api/test/test_data/network.ief +20 -20
  97. floodmodeller_api/test/test_data/network.inp +147 -147
  98. floodmodeller_api/test/test_data/network.pxy +57 -57
  99. floodmodeller_api/test/test_data/network.zzd +122 -122
  100. floodmodeller_api/test/test_data/network_dat_expected.json +21837 -0
  101. floodmodeller_api/test/test_data/network_from_tabularCSV.csv +87 -87
  102. floodmodeller_api/test/test_data/network_ied_expected.json +287 -0
  103. floodmodeller_api/test/test_data/rnweir.dat +9 -9
  104. floodmodeller_api/test/test_data/rnweir.ext +45 -45
  105. floodmodeller_api/test/test_data/rnweir.feb +9 -9
  106. floodmodeller_api/test/test_data/rnweir.gxy +45 -45
  107. floodmodeller_api/test/test_data/rnweir_default.dat +74 -74
  108. floodmodeller_api/test/test_data/rnweir_default.ext +45 -45
  109. floodmodeller_api/test/test_data/rnweir_default.feb +9 -9
  110. floodmodeller_api/test/test_data/rnweir_default.fmpx +58 -58
  111. floodmodeller_api/test/test_data/rnweir_default.gxy +53 -53
  112. floodmodeller_api/test/test_data/unit checks.dat +16 -16
  113. floodmodeller_api/test/test_ied.py +29 -29
  114. floodmodeller_api/test/test_ief.py +125 -24
  115. floodmodeller_api/test/test_inp.py +47 -48
  116. floodmodeller_api/test/test_json.py +114 -0
  117. floodmodeller_api/test/test_logs_lf.py +48 -51
  118. floodmodeller_api/test/test_tool.py +165 -152
  119. floodmodeller_api/test/test_toolbox_structure_log.py +234 -239
  120. floodmodeller_api/test/test_xml2d.py +151 -156
  121. floodmodeller_api/test/test_zzn.py +36 -34
  122. floodmodeller_api/to_from_json.py +218 -0
  123. floodmodeller_api/tool.py +332 -329
  124. floodmodeller_api/toolbox/__init__.py +5 -5
  125. floodmodeller_api/toolbox/example_tool.py +45 -45
  126. floodmodeller_api/toolbox/model_build/__init__.py +2 -2
  127. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +100 -98
  128. floodmodeller_api/toolbox/model_build/structure_log/__init__.py +1 -1
  129. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +287 -289
  130. floodmodeller_api/toolbox/model_build/structure_log_definition.py +76 -76
  131. floodmodeller_api/units/__init__.py +10 -10
  132. floodmodeller_api/units/_base.py +214 -212
  133. floodmodeller_api/units/boundaries.py +467 -467
  134. floodmodeller_api/units/comment.py +52 -55
  135. floodmodeller_api/units/conduits.py +382 -402
  136. floodmodeller_api/units/helpers.py +123 -131
  137. floodmodeller_api/units/iic.py +107 -101
  138. floodmodeller_api/units/losses.py +305 -306
  139. floodmodeller_api/units/sections.py +444 -446
  140. floodmodeller_api/units/structures.py +1690 -1683
  141. floodmodeller_api/units/units.py +93 -104
  142. floodmodeller_api/units/unsupported.py +44 -44
  143. floodmodeller_api/units/variables.py +87 -89
  144. floodmodeller_api/urban1d/__init__.py +11 -11
  145. floodmodeller_api/urban1d/_base.py +188 -179
  146. floodmodeller_api/urban1d/conduits.py +93 -85
  147. floodmodeller_api/urban1d/general_parameters.py +58 -58
  148. floodmodeller_api/urban1d/junctions.py +81 -79
  149. floodmodeller_api/urban1d/losses.py +81 -74
  150. floodmodeller_api/urban1d/outfalls.py +114 -110
  151. floodmodeller_api/urban1d/raingauges.py +111 -111
  152. floodmodeller_api/urban1d/subsections.py +92 -98
  153. floodmodeller_api/urban1d/xsections.py +147 -144
  154. floodmodeller_api/util.py +77 -21
  155. floodmodeller_api/validation/parameters.py +660 -660
  156. floodmodeller_api/validation/urban_parameters.py +388 -404
  157. floodmodeller_api/validation/validation.py +110 -108
  158. floodmodeller_api/version.py +1 -1
  159. floodmodeller_api/xml2d.py +688 -673
  160. floodmodeller_api/xml2d_template.py +37 -37
  161. floodmodeller_api/zzn.py +387 -363
  162. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.3.dist-info}/LICENSE.txt +13 -13
  163. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.3.dist-info}/METADATA +82 -82
  164. floodmodeller_api-0.4.3.dist-info/RECORD +179 -0
  165. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.3.dist-info}/WHEEL +1 -1
  166. floodmodeller_api/libifcoremd.dll +0 -0
  167. floodmodeller_api/test/test_data/EX3.bmp +0 -0
  168. floodmodeller_api/test/test_data/test_output.csv +0 -87
  169. floodmodeller_api/zzn_read.dll +0 -0
  170. floodmodeller_api-0.4.2.post1.dist-info/RECORD +0 -164
  171. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.3.dist-info}/entry_points.txt +0 -0
  172. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.3.dist-info}/top_level.txt +0 -0
floodmodeller_api/zzn.py CHANGED
@@ -1,363 +1,387 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2023 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- import ctypes as ct
18
- from pathlib import Path
19
- from typing import Any, Dict, Optional, Union
20
-
21
- import numpy as np
22
- import pandas as pd
23
-
24
- from ._base import FMFile
25
-
26
-
27
- class ZZN(FMFile):
28
- """Reads and processes Flood Modeller 1D binary results format '.zzn'
29
-
30
- Args:
31
- zzn_filepath (str): Full filepath to model zzn file
32
-
33
- Output:
34
- Initiates 'ZZN' class object
35
- """
36
-
37
- _filetype: str = "ZZN"
38
- _suffix: str = ".zzn"
39
-
40
- def __init__(self, zzn_filepath: Optional[Union[str, Path]]):
41
- try:
42
- FMFile.__init__(self, zzn_filepath)
43
-
44
- # Get zzn_dll path
45
- zzn_dll = Path(Path(__file__).resolve().parent, "zzn_read.dll")
46
- # Using str() method as CDLL doesn't seem to like accepting Path object
47
- zzn_read = ct.CDLL(str(zzn_dll))
48
-
49
- # Get zzl path
50
- zzn = self._filepath
51
- zzl = zzn.with_suffix(".zzl")
52
- if not zzl.exists():
53
- raise FileNotFoundError(
54
- "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name."
55
- )
56
-
57
- self.meta: Dict[str, Any] = {} # Dict object to hold all metadata
58
- self.data = {} # Dict object to hold all data
59
-
60
- # PROCESS_ZZL
61
- self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
62
- self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
63
- self.meta["model_title"] = ct.create_string_buffer(b"", 128)
64
- self.meta["nnodes"] = ct.c_int(0)
65
- self.meta["label_length"] = ct.c_int(0)
66
- self.meta["dt"] = ct.c_float(0.0)
67
- self.meta["timestep0"] = ct.c_int(0)
68
- self.meta["ltimestep"] = ct.c_int(0)
69
- self.meta["save_int"] = ct.c_float(0.0)
70
- self.meta["is_quality"] = ct.c_bool(False)
71
- self.meta["nvars"] = ct.c_int(0)
72
- self.meta["tzero"] = (ct.c_int * 5)()
73
- self.meta["errstat"] = ct.c_int(0)
74
- zzn_read.PROCESS_ZZL(
75
- ct.byref(self.meta["zzl_name"]),
76
- ct.byref(self.meta["model_title"]),
77
- ct.byref(self.meta["nnodes"]),
78
- ct.byref(self.meta["label_length"]),
79
- ct.byref(self.meta["dt"]),
80
- ct.byref(self.meta["timestep0"]),
81
- ct.byref(self.meta["ltimestep"]),
82
- ct.byref(self.meta["save_int"]),
83
- ct.byref(self.meta["is_quality"]),
84
- ct.byref(self.meta["nvars"]),
85
- ct.byref(self.meta["tzero"]),
86
- ct.byref(self.meta["errstat"]),
87
- )
88
- # PROCESS_LABELS
89
- self.meta["labels"] = (
90
- ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
91
- )()
92
- zzn_read.PROCESS_LABELS(
93
- ct.byref(self.meta["zzl_name"]),
94
- ct.byref(self.meta["nnodes"]),
95
- ct.byref(self.meta["labels"]),
96
- ct.byref(self.meta["label_length"]),
97
- ct.byref(self.meta["errstat"]),
98
- )
99
- # PREPROCESS_ZZN
100
- last_hr = (
101
- (self.meta["ltimestep"].value - self.meta["timestep0"].value)
102
- * self.meta["dt"].value
103
- / 3600
104
- )
105
- self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
106
- self.meta["aitimestep"] = (ct.c_int * 2)(
107
- self.meta["timestep0"].value, self.meta["ltimestep"].value
108
- )
109
- self.meta["isavint"] = (ct.c_int * 2)()
110
- zzn_read.PREPROCESS_ZZN(
111
- ct.byref(self.meta["output_hrs"]),
112
- ct.byref(self.meta["aitimestep"]),
113
- ct.byref(self.meta["dt"]),
114
- ct.byref(self.meta["timestep0"]),
115
- ct.byref(self.meta["ltimestep"]),
116
- ct.byref(self.meta["save_int"]),
117
- ct.byref(self.meta["isavint"]),
118
- )
119
- # PROCESS_ZZN
120
- self.meta["node_ID"] = ct.c_int(-1)
121
- self.meta["savint_skip"] = ct.c_int(1)
122
- self.meta["savint_range"] = ct.c_int(
123
- int(
124
- (
125
- (self.meta["isavint"][1] - self.meta["isavint"][0])
126
- / self.meta["savint_skip"].value
127
- )
128
- )
129
- )
130
- nx = self.meta["nnodes"].value
131
- ny = self.meta["nvars"].value
132
- nz = self.meta["savint_range"].value + 1
133
- self.data["all_results"] = (ct.c_float * nx * ny * nz)()
134
- self.data["max_results"] = (ct.c_float * nx * ny)()
135
- self.data["min_results"] = (ct.c_float * nx * ny)()
136
- self.data["max_times"] = (ct.c_int * nx * ny)()
137
- self.data["min_times"] = (ct.c_int * nx * ny)()
138
- zzn_read.PROCESS_ZZN(
139
- ct.byref(self.meta["zzn_name"]),
140
- ct.byref(self.meta["node_ID"]),
141
- ct.byref(self.meta["nnodes"]),
142
- ct.byref(self.meta["is_quality"]),
143
- ct.byref(self.meta["nvars"]),
144
- ct.byref(self.meta["savint_range"]),
145
- ct.byref(self.meta["savint_skip"]),
146
- ct.byref(self.data["all_results"]),
147
- ct.byref(self.data["max_results"]),
148
- ct.byref(self.data["min_results"]),
149
- ct.byref(self.data["max_times"]),
150
- ct.byref(self.data["min_times"]),
151
- ct.byref(self.meta["errstat"]),
152
- ct.byref(self.meta["isavint"]),
153
- )
154
-
155
- # Convert useful metadata from C types into python types
156
-
157
- self.meta["dt"] = self.meta["dt"].value
158
- self.meta["nnodes"] = self.meta["nnodes"].value
159
- self.meta["save_int"] = self.meta["save_int"].value
160
- self.meta["nvars"] = self.meta["nvars"].value
161
- self.meta["savint_range"] = self.meta["savint_range"].value
162
-
163
- self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
164
- self.meta["labels"] = [
165
- label.value.decode().strip() for label in list(self.meta["labels"])
166
- ]
167
- self.meta["model_title"] = self.meta["model_title"].value.decode()
168
-
169
- except Exception as e:
170
- self._handle_exception(e, when="read")
171
-
172
- def to_dataframe(
173
- self,
174
- result_type: str = "all",
175
- variable: str = "all",
176
- include_time: bool = False,
177
- multilevel_header: bool = True,
178
- ) -> Union[pd.Series, pd.DataFrame]:
179
- """Loads zzn results to pandas dataframe object.
180
-
181
- Args:
182
- result_type (str, optional): {'all'} | 'max' | 'min'
183
- Define whether to return all timesteps or just max/min results. Defaults to 'all'.
184
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
185
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
186
- include_time (bool, optional):
187
- Whether to include the time of max or min results. Defaults to False.
188
- multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
189
- headers with the variable as first level and node label as second header. If False, the column
190
- names will be formatted "{node label}_{variable}". Defaults to True.
191
-
192
- Returns:
193
- pandas.DataFrame(): dataframe object of simulation results
194
- """
195
- nx = self.meta["nnodes"]
196
- ny = self.meta["nvars"]
197
- nz = self.meta["savint_range"] + 1
198
- result_type = result_type.lower()
199
-
200
- if result_type == "all":
201
- arr = np.array(self.data["all_results"])
202
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
203
- vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
204
- if multilevel_header:
205
- col_names = [vars_list, self.meta["labels"]]
206
- df = pd.DataFrame(
207
- arr.reshape(nz, nx * ny),
208
- index=time_index,
209
- columns=pd.MultiIndex.from_product(col_names),
210
- )
211
- df.index.name = "Time (hr)"
212
- if variable != "all":
213
- return df[variable.capitalize()]
214
-
215
- else:
216
- col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
217
- df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
218
- df.index.name = "Time (hr)"
219
- if variable != "all":
220
- use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
221
- return df[use_cols]
222
- return df
223
-
224
- if result_type in ("max", "min"):
225
- arr = np.array(self.data[f"{result_type}_results"]).transpose()
226
- node_index = self.meta["labels"]
227
- col_names = [
228
- result_type.capitalize() + lbl
229
- for lbl in [
230
- " Flow",
231
- " Stage",
232
- " Froude",
233
- " Velocity",
234
- " Mode",
235
- " State",
236
- ]
237
- ]
238
- df = pd.DataFrame(arr, index=node_index, columns=col_names)
239
- df.index.name = "Node Label"
240
-
241
- if include_time:
242
- times = np.array(self.data[f"{result_type}_times"]).transpose()
243
- # transform timestep into hrs
244
- times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
245
- time_col_names = [name + " Time(hrs)" for name in col_names]
246
- time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
247
- time_df.index.name = "Node Label"
248
- df = pd.concat([df, time_df], axis=1)
249
- new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
250
- df = df[new_col_order]
251
- if variable != "all":
252
- return df[
253
- [
254
- f"{result_type.capitalize()} {variable.capitalize()}",
255
- f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
256
- ]
257
- ]
258
- return df
259
-
260
- if variable != "all":
261
- return df[f"{result_type.capitalize()} {variable.capitalize()}"]
262
- return df
263
-
264
- raise ValueError(f'Result type: "{result_type}" not recognised')
265
-
266
- def export_to_csv(
267
- self,
268
- save_location: Union[str, Path] = "default",
269
- result_type: str = "all",
270
- variable: str = "all",
271
- include_time: bool = False,
272
- ) -> None:
273
- """Exports zzn results to CSV file.
274
-
275
- Args:
276
- save_location (str, optional): {default} | folder or file path
277
- Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
278
- result_type (str, optional): {all} | max | min
279
- Define whether to output all timesteps or just max/min results. Defaults to 'all'.
280
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
281
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
282
- include_time (bool, optional):
283
- Whether to include the time of max or min results. Defaults to False.
284
-
285
- Raises:
286
- Exception: Raised if result_type set to invalid option
287
- """
288
- if save_location == "default":
289
- save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
290
- else:
291
- save_location = Path(save_location)
292
- if not save_location.is_absolute():
293
- # for if relative folder path given
294
- save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
295
-
296
- if save_location.suffix != ".csv": # Assumed to be pointing to a folder
297
- # Check if the folder exists, if not create it
298
- if not save_location.exists():
299
- Path.mkdir(save_location)
300
- save_location = Path(
301
- save_location, Path(self.meta["zzn_name"]).with_suffix(".csv").name
302
- )
303
-
304
- else:
305
- if not save_location.parent.exists():
306
- Path.mkdir(save_location.parent)
307
-
308
- result_type = result_type.lower()
309
-
310
- if result_type.lower() not in ["all", "max", "min"]:
311
- raise Exception(
312
- f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' "
313
- )
314
-
315
- df = self.to_dataframe(
316
- result_type=result_type, variable=variable, include_time=include_time
317
- )
318
- df.to_csv(save_location)
319
- print(f"CSV saved to {save_location}")
320
-
321
- def to_dict_of_dataframes(self, variable: str = "all") -> dict:
322
- """Loads zzn results to a dictionary of pandas dataframe objects.
323
-
324
- Args:
325
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
326
- Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
327
- variable names. Defaults to 'all'.
328
-
329
- Returns:
330
- dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
331
- """
332
- nx = self.meta["nnodes"]
333
- ny = self.meta["nvars"]
334
- nz = self.meta["savint_range"] + 1
335
- output = {}
336
-
337
- arr = np.array(self.data["all_results"])
338
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
339
-
340
- vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
341
-
342
- col_names = self.meta["labels"]
343
- temp_arr = np.reshape(arr, (nz, ny, nx))
344
-
345
- for i, var in enumerate(vars_list):
346
- output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
347
- output[var].index.name = "Time (hr)"
348
-
349
- output["Time (hr)"] = time_index
350
-
351
- if variable != "all":
352
- input_vars = variable.split(",")
353
- for i, var in enumerate(input_vars):
354
- input_vars[i] = var.strip().capitalize()
355
- if input_vars[i] not in vars_list:
356
- raise Exception(
357
- f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} "
358
- )
359
-
360
- for var in vars_list:
361
- if var not in input_vars:
362
- del output[var]
363
- return output
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import ctypes as ct
20
+ from pathlib import Path
21
+ from typing import Any
22
+
23
+ import numpy as np
24
+ import pandas as pd
25
+
26
+ from ._base import FMFile
27
+ from .util import is_windows
28
+
29
+
30
+ class ZZN(FMFile):
31
+ """Reads and processes Flood Modeller 1D binary results format '.zzn'
32
+
33
+ Args:
34
+ zzn_filepath (str): Full filepath to model zzn file
35
+
36
+ Output:
37
+ Initiates 'ZZN' class object
38
+ """
39
+
40
+ _filetype: str = "ZZN"
41
+ _suffix: str = ".zzn"
42
+
43
+ def __init__( # noqa: PLR0915
44
+ self,
45
+ zzn_filepath: str | Path | None = None,
46
+ from_json: bool = False,
47
+ ):
48
+ try:
49
+ if from_json:
50
+ return
51
+ FMFile.__init__(self, zzn_filepath)
52
+
53
+ # Get zzn_dll path
54
+ lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
55
+ zzn_dll = Path(__file__).resolve().parent / "libs" / lib
56
+
57
+ # Catch LD_LIBRARY_PATH error for linux
58
+ try:
59
+ zzn_read = ct.CDLL(str(zzn_dll))
60
+ except OSError as e:
61
+ msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
62
+ if msg_1 in str(e):
63
+ msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
64
+ raise OSError(msg_2) from e
65
+ raise
66
+
67
+ # Get zzl path
68
+ zzn = self._filepath
69
+ zzl = zzn.with_suffix(".zzl")
70
+ if not zzl.exists():
71
+ raise FileNotFoundError(
72
+ "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name.",
73
+ )
74
+
75
+ self.meta: dict[str, Any] = {} # Dict object to hold all metadata
76
+ self.data = {} # Dict object to hold all data
77
+
78
+ # PROCESS_ZZL
79
+ self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
80
+ self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
81
+ self.meta["model_title"] = ct.create_string_buffer(b"", 128)
82
+ self.meta["nnodes"] = ct.c_int(0)
83
+ self.meta["label_length"] = ct.c_int(0)
84
+ self.meta["dt"] = ct.c_float(0.0)
85
+ self.meta["timestep0"] = ct.c_int(0)
86
+ self.meta["ltimestep"] = ct.c_int(0)
87
+ self.meta["save_int"] = ct.c_float(0.0)
88
+ self.meta["is_quality"] = ct.c_bool(False)
89
+ self.meta["nvars"] = ct.c_int(0)
90
+ self.meta["tzero"] = (ct.c_int * 5)()
91
+ self.meta["errstat"] = ct.c_int(0)
92
+ zzn_read.process_zzl(
93
+ ct.byref(self.meta["zzl_name"]),
94
+ ct.byref(self.meta["model_title"]),
95
+ ct.byref(self.meta["nnodes"]),
96
+ ct.byref(self.meta["label_length"]),
97
+ ct.byref(self.meta["dt"]),
98
+ ct.byref(self.meta["timestep0"]),
99
+ ct.byref(self.meta["ltimestep"]),
100
+ ct.byref(self.meta["save_int"]),
101
+ ct.byref(self.meta["is_quality"]),
102
+ ct.byref(self.meta["nvars"]),
103
+ ct.byref(self.meta["tzero"]),
104
+ ct.byref(self.meta["errstat"]),
105
+ )
106
+ # PROCESS_LABELS
107
+ self.meta["labels"] = (
108
+ ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
109
+ )()
110
+ zzn_read.process_labels(
111
+ ct.byref(self.meta["zzl_name"]),
112
+ ct.byref(self.meta["nnodes"]),
113
+ ct.byref(self.meta["label_length"]),
114
+ ct.byref(self.meta["errstat"]),
115
+ )
116
+ for i in range(self.meta["nnodes"].value):
117
+ zzn_read.get_zz_label(
118
+ ct.byref(ct.c_int(i + 1)),
119
+ ct.byref(self.meta["labels"][i]),
120
+ ct.byref(self.meta["errstat"]),
121
+ )
122
+ # PREPROCESS_ZZN
123
+ last_hr = (
124
+ (self.meta["ltimestep"].value - self.meta["timestep0"].value)
125
+ * self.meta["dt"].value
126
+ / 3600
127
+ )
128
+ self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
129
+ self.meta["aitimestep"] = (ct.c_int * 2)(
130
+ self.meta["timestep0"].value,
131
+ self.meta["ltimestep"].value,
132
+ )
133
+ self.meta["isavint"] = (ct.c_int * 2)()
134
+ zzn_read.preprocess_zzn(
135
+ ct.byref(self.meta["output_hrs"]),
136
+ ct.byref(self.meta["aitimestep"]),
137
+ ct.byref(self.meta["dt"]),
138
+ ct.byref(self.meta["timestep0"]),
139
+ ct.byref(self.meta["ltimestep"]),
140
+ ct.byref(self.meta["save_int"]),
141
+ ct.byref(self.meta["isavint"]),
142
+ )
143
+ # PROCESS_ZZN
144
+ self.meta["node_ID"] = ct.c_int(-1)
145
+ self.meta["savint_skip"] = ct.c_int(1)
146
+ self.meta["savint_range"] = ct.c_int(
147
+ int(
148
+ (self.meta["isavint"][1] - self.meta["isavint"][0])
149
+ / self.meta["savint_skip"].value,
150
+ ),
151
+ )
152
+ nx = self.meta["nnodes"].value
153
+ ny = self.meta["nvars"].value
154
+ nz = self.meta["savint_range"].value + 1
155
+ self.data["all_results"] = (ct.c_float * nx * ny * nz)()
156
+ self.data["max_results"] = (ct.c_float * nx * ny)()
157
+ self.data["min_results"] = (ct.c_float * nx * ny)()
158
+ self.data["max_times"] = (ct.c_int * nx * ny)()
159
+ self.data["min_times"] = (ct.c_int * nx * ny)()
160
+ zzn_read.process_zzn(
161
+ ct.byref(self.meta["zzn_name"]),
162
+ ct.byref(self.meta["node_ID"]),
163
+ ct.byref(self.meta["nnodes"]),
164
+ ct.byref(self.meta["is_quality"]),
165
+ ct.byref(self.meta["nvars"]),
166
+ ct.byref(self.meta["savint_range"]),
167
+ ct.byref(self.meta["savint_skip"]),
168
+ ct.byref(self.data["all_results"]),
169
+ ct.byref(self.data["max_results"]),
170
+ ct.byref(self.data["min_results"]),
171
+ ct.byref(self.data["max_times"]),
172
+ ct.byref(self.data["min_times"]),
173
+ ct.byref(self.meta["errstat"]),
174
+ ct.byref(self.meta["isavint"]),
175
+ )
176
+
177
+ # Convert useful metadata from C types into python types
178
+
179
+ self.meta["dt"] = self.meta["dt"].value
180
+ self.meta["nnodes"] = self.meta["nnodes"].value
181
+ self.meta["save_int"] = self.meta["save_int"].value
182
+ self.meta["nvars"] = self.meta["nvars"].value
183
+ self.meta["savint_range"] = self.meta["savint_range"].value
184
+
185
+ self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
186
+ self.meta["labels"] = [
187
+ label.value.decode().strip() for label in list(self.meta["labels"])
188
+ ]
189
+ self.meta["model_title"] = self.meta["model_title"].value.decode()
190
+
191
+ except Exception as e:
192
+ self._handle_exception(e, when="read")
193
+
194
+ def to_dataframe( # noqa: PLR0911
195
+ self,
196
+ result_type: str = "all",
197
+ variable: str = "all",
198
+ include_time: bool = False,
199
+ multilevel_header: bool = True,
200
+ ) -> pd.Series | pd.DataFrame:
201
+ """Loads zzn results to pandas dataframe object.
202
+
203
+ Args:
204
+ result_type (str, optional): {'all'} | 'max' | 'min'
205
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
206
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
207
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
208
+ include_time (bool, optional):
209
+ Whether to include the time of max or min results. Defaults to False.
210
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
211
+ headers with the variable as first level and node label as second header. If False, the column
212
+ names will be formatted "{node label}_{variable}". Defaults to True.
213
+
214
+ Returns:
215
+ pandas.DataFrame(): dataframe object of simulation results
216
+ """
217
+ nx = self.meta["nnodes"]
218
+ ny = self.meta["nvars"]
219
+ nz = self.meta["savint_range"] + 1
220
+ result_type = result_type.lower()
221
+
222
+ if result_type == "all":
223
+ arr = np.array(self.data["all_results"])
224
+ time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
225
+ vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
226
+ if multilevel_header:
227
+ col_names = [vars_list, self.meta["labels"]]
228
+ df = pd.DataFrame(
229
+ arr.reshape(nz, nx * ny),
230
+ index=time_index,
231
+ columns=pd.MultiIndex.from_product(col_names),
232
+ )
233
+ df.index.name = "Time (hr)"
234
+ if variable != "all":
235
+ return df[variable.capitalize()]
236
+
237
+ else:
238
+ col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
239
+ df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
240
+ df.index.name = "Time (hr)"
241
+ if variable != "all":
242
+ use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
243
+ return df[use_cols]
244
+ return df
245
+
246
+ if result_type in ("max", "min"):
247
+ arr = np.array(self.data[f"{result_type}_results"]).transpose()
248
+ node_index = self.meta["labels"]
249
+ col_names = [
250
+ result_type.capitalize() + lbl
251
+ for lbl in [
252
+ " Flow",
253
+ " Stage",
254
+ " Froude",
255
+ " Velocity",
256
+ " Mode",
257
+ " State",
258
+ ]
259
+ ]
260
+ df = pd.DataFrame(arr, index=node_index, columns=col_names)
261
+ df.index.name = "Node Label"
262
+
263
+ if include_time:
264
+ times = np.array(self.data[f"{result_type}_times"]).transpose()
265
+ # transform timestep into hrs
266
+ times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
267
+ time_col_names = [name + " Time(hrs)" for name in col_names]
268
+ time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
269
+ time_df.index.name = "Node Label"
270
+ df = pd.concat([df, time_df], axis=1)
271
+ new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
272
+ df = df[new_col_order]
273
+ if variable != "all":
274
+ return df[
275
+ [
276
+ f"{result_type.capitalize()} {variable.capitalize()}",
277
+ f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
278
+ ]
279
+ ]
280
+ return df
281
+
282
+ if variable != "all":
283
+ return df[f"{result_type.capitalize()} {variable.capitalize()}"]
284
+ return df
285
+
286
+ raise ValueError(f'Result type: "{result_type}" not recognised')
287
+
288
+ def export_to_csv(
289
+ self,
290
+ save_location: str | Path = "default",
291
+ result_type: str = "all",
292
+ variable: str = "all",
293
+ include_time: bool = False,
294
+ ) -> None:
295
+ """Exports zzn results to CSV file.
296
+
297
+ Args:
298
+ save_location (str, optional): {default} | folder or file path
299
+ Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
300
+ result_type (str, optional): {all} | max | min
301
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
302
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
303
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
304
+ include_time (bool, optional):
305
+ Whether to include the time of max or min results. Defaults to False.
306
+
307
+ Raises:
308
+ Exception: Raised if result_type set to invalid option
309
+ """
310
+ if save_location == "default":
311
+ save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
312
+ else:
313
+ save_location = Path(save_location)
314
+ if not save_location.is_absolute():
315
+ # for if relative folder path given
316
+ save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
317
+
318
+ if save_location.suffix != ".csv": # Assumed to be pointing to a folder
319
+ # Check if the folder exists, if not create it
320
+ if not save_location.exists():
321
+ Path.mkdir(save_location)
322
+ save_location = Path(
323
+ save_location,
324
+ Path(self.meta["zzn_name"]).with_suffix(".csv").name,
325
+ )
326
+
327
+ elif not save_location.parent.exists():
328
+ Path.mkdir(save_location.parent)
329
+
330
+ result_type = result_type.lower()
331
+
332
+ if result_type.lower() not in ["all", "max", "min"]:
333
+ raise Exception(
334
+ f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' ",
335
+ )
336
+
337
+ df = self.to_dataframe(
338
+ result_type=result_type,
339
+ variable=variable,
340
+ include_time=include_time,
341
+ )
342
+ df.to_csv(save_location)
343
+ print(f"CSV saved to {save_location}")
344
+
345
+ def to_dict_of_dataframes(self, variable: str = "all") -> dict:
346
+ """Loads zzn results to a dictionary of pandas dataframe objects.
347
+
348
+ Args:
349
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
350
+ Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
351
+ variable names. Defaults to 'all'.
352
+
353
+ Returns:
354
+ dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
355
+ """
356
+ nx = self.meta["nnodes"]
357
+ ny = self.meta["nvars"]
358
+ nz = self.meta["savint_range"] + 1
359
+ output = {}
360
+
361
+ arr = np.array(self.data["all_results"])
362
+ time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
363
+
364
+ vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
365
+
366
+ col_names = self.meta["labels"]
367
+ temp_arr = np.reshape(arr, (nz, ny, nx))
368
+
369
+ for i, var in enumerate(vars_list):
370
+ output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
371
+ output[var].index.name = "Time (hr)"
372
+
373
+ output["Time (hr)"] = time_index
374
+
375
+ if variable != "all":
376
+ input_vars = variable.split(",")
377
+ for i, var in enumerate(input_vars):
378
+ input_vars[i] = var.strip().capitalize()
379
+ if input_vars[i] not in vars_list:
380
+ raise Exception(
381
+ f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} ",
382
+ )
383
+
384
+ for var in vars_list:
385
+ if var not in input_vars:
386
+ del output[var]
387
+ return output