floodmodeller-api 0.4.2__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. floodmodeller_api/__init__.py +8 -9
  2. floodmodeller_api/_base.py +184 -176
  3. floodmodeller_api/backup.py +273 -273
  4. floodmodeller_api/dat.py +909 -838
  5. floodmodeller_api/diff.py +136 -119
  6. floodmodeller_api/ied.py +307 -311
  7. floodmodeller_api/ief.py +647 -646
  8. floodmodeller_api/ief_flags.py +253 -253
  9. floodmodeller_api/inp.py +266 -268
  10. floodmodeller_api/libs/libifcoremd.dll +0 -0
  11. floodmodeller_api/libs/libifcoremt.so.5 +0 -0
  12. floodmodeller_api/libs/libifport.so.5 +0 -0
  13. floodmodeller_api/{libmmd.dll → libs/libimf.so} +0 -0
  14. floodmodeller_api/libs/libintlc.so.5 +0 -0
  15. floodmodeller_api/libs/libmmd.dll +0 -0
  16. floodmodeller_api/libs/libsvml.so +0 -0
  17. floodmodeller_api/libs/libzzn_read.so +0 -0
  18. floodmodeller_api/libs/zzn_read.dll +0 -0
  19. floodmodeller_api/logs/__init__.py +2 -2
  20. floodmodeller_api/logs/lf.py +320 -314
  21. floodmodeller_api/logs/lf_helpers.py +354 -346
  22. floodmodeller_api/logs/lf_params.py +643 -529
  23. floodmodeller_api/mapping.py +84 -0
  24. floodmodeller_api/test/__init__.py +4 -4
  25. floodmodeller_api/test/conftest.py +9 -8
  26. floodmodeller_api/test/test_backup.py +117 -117
  27. floodmodeller_api/test/test_dat.py +221 -92
  28. floodmodeller_api/test/test_data/All Units 4_6.DAT +1081 -1081
  29. floodmodeller_api/test/test_data/All Units 4_6.feb +1081 -1081
  30. floodmodeller_api/test/test_data/BRIDGE.DAT +926 -926
  31. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.dat +36 -36
  32. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.feb +36 -36
  33. floodmodeller_api/test/test_data/DamBreakADI.xml +52 -52
  34. floodmodeller_api/test/test_data/DamBreakFAST.xml +58 -58
  35. floodmodeller_api/test/test_data/DamBreakFAST_dy.xml +53 -53
  36. floodmodeller_api/test/test_data/DamBreakTVD.xml +55 -55
  37. floodmodeller_api/test/test_data/DefenceBreach.xml +53 -53
  38. floodmodeller_api/test/test_data/DefenceBreachFAST.xml +60 -60
  39. floodmodeller_api/test/test_data/DefenceBreachFAST_dy.xml +55 -55
  40. floodmodeller_api/test/test_data/Domain1+2_QH.xml +76 -76
  41. floodmodeller_api/test/test_data/Domain1_H.xml +41 -41
  42. floodmodeller_api/test/test_data/Domain1_Q.xml +41 -41
  43. floodmodeller_api/test/test_data/Domain1_Q_FAST.xml +48 -48
  44. floodmodeller_api/test/test_data/Domain1_Q_FAST_dy.xml +48 -48
  45. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +263 -0
  46. floodmodeller_api/test/test_data/Domain1_W.xml +41 -41
  47. floodmodeller_api/test/test_data/EX1.DAT +321 -321
  48. floodmodeller_api/test/test_data/EX1.ext +107 -107
  49. floodmodeller_api/test/test_data/EX1.feb +320 -320
  50. floodmodeller_api/test/test_data/EX1.gxy +107 -107
  51. floodmodeller_api/test/test_data/EX17.DAT +421 -422
  52. floodmodeller_api/test/test_data/EX17.ext +213 -213
  53. floodmodeller_api/test/test_data/EX17.feb +422 -422
  54. floodmodeller_api/test/test_data/EX18.DAT +375 -375
  55. floodmodeller_api/test/test_data/EX18_DAT_expected.json +3876 -0
  56. floodmodeller_api/test/test_data/EX2.DAT +302 -302
  57. floodmodeller_api/test/test_data/EX3.DAT +926 -926
  58. floodmodeller_api/test/test_data/EX3_DAT_expected.json +16235 -0
  59. floodmodeller_api/test/test_data/EX3_IEF_expected.json +61 -0
  60. floodmodeller_api/test/test_data/EX6.DAT +2084 -2084
  61. floodmodeller_api/test/test_data/EX6.ext +532 -532
  62. floodmodeller_api/test/test_data/EX6.feb +2084 -2084
  63. floodmodeller_api/test/test_data/EX6_DAT_expected.json +31647 -0
  64. floodmodeller_api/test/test_data/Event Data Example.DAT +336 -336
  65. floodmodeller_api/test/test_data/Event Data Example.ext +107 -107
  66. floodmodeller_api/test/test_data/Event Data Example.feb +336 -336
  67. floodmodeller_api/test/test_data/Linked1D2D.xml +52 -52
  68. floodmodeller_api/test/test_data/Linked1D2DFAST.xml +53 -53
  69. floodmodeller_api/test/test_data/Linked1D2DFAST_dy.xml +48 -48
  70. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +313 -0
  71. floodmodeller_api/test/test_data/blockage.dat +50 -50
  72. floodmodeller_api/test/test_data/blockage.ext +45 -45
  73. floodmodeller_api/test/test_data/blockage.feb +9 -9
  74. floodmodeller_api/test/test_data/blockage.gxy +71 -71
  75. floodmodeller_api/test/test_data/defaultUnits.dat +127 -127
  76. floodmodeller_api/test/test_data/defaultUnits.ext +45 -45
  77. floodmodeller_api/test/test_data/defaultUnits.feb +9 -9
  78. floodmodeller_api/test/test_data/defaultUnits.fmpx +58 -58
  79. floodmodeller_api/test/test_data/defaultUnits.gxy +85 -85
  80. floodmodeller_api/test/test_data/ex3.ief +20 -20
  81. floodmodeller_api/test/test_data/ex3.lf1 +2800 -2800
  82. floodmodeller_api/test/test_data/ex4.DAT +1374 -1374
  83. floodmodeller_api/test/test_data/ex4_changed.DAT +1374 -1374
  84. floodmodeller_api/test/test_data/example1.inp +329 -329
  85. floodmodeller_api/test/test_data/example2.inp +158 -158
  86. floodmodeller_api/test/test_data/example3.inp +297 -297
  87. floodmodeller_api/test/test_data/example4.inp +388 -388
  88. floodmodeller_api/test/test_data/example5.inp +147 -147
  89. floodmodeller_api/test/test_data/example6.inp +154 -154
  90. floodmodeller_api/test/test_data/jump.dat +176 -176
  91. floodmodeller_api/test/test_data/network.dat +1374 -1374
  92. floodmodeller_api/test/test_data/network.ext +45 -45
  93. floodmodeller_api/test/test_data/network.exy +1 -1
  94. floodmodeller_api/test/test_data/network.feb +45 -45
  95. floodmodeller_api/test/test_data/network.ied +45 -45
  96. floodmodeller_api/test/test_data/network.ief +20 -20
  97. floodmodeller_api/test/test_data/network.inp +147 -147
  98. floodmodeller_api/test/test_data/network.pxy +57 -57
  99. floodmodeller_api/test/test_data/network.zzd +122 -122
  100. floodmodeller_api/test/test_data/network_dat_expected.json +21837 -0
  101. floodmodeller_api/test/test_data/network_from_tabularCSV.csv +87 -87
  102. floodmodeller_api/test/test_data/network_ied_expected.json +287 -0
  103. floodmodeller_api/test/test_data/rnweir.dat +9 -9
  104. floodmodeller_api/test/test_data/rnweir.ext +45 -45
  105. floodmodeller_api/test/test_data/rnweir.feb +9 -9
  106. floodmodeller_api/test/test_data/rnweir.gxy +45 -45
  107. floodmodeller_api/test/test_data/rnweir_default.dat +74 -74
  108. floodmodeller_api/test/test_data/rnweir_default.ext +45 -45
  109. floodmodeller_api/test/test_data/rnweir_default.feb +9 -9
  110. floodmodeller_api/test/test_data/rnweir_default.fmpx +58 -58
  111. floodmodeller_api/test/test_data/rnweir_default.gxy +53 -53
  112. floodmodeller_api/test/test_data/unit checks.dat +16 -16
  113. floodmodeller_api/test/test_ied.py +29 -29
  114. floodmodeller_api/test/test_ief.py +125 -24
  115. floodmodeller_api/test/test_inp.py +47 -48
  116. floodmodeller_api/test/test_json.py +114 -0
  117. floodmodeller_api/test/test_logs_lf.py +48 -51
  118. floodmodeller_api/test/test_tool.py +165 -154
  119. floodmodeller_api/test/test_toolbox_structure_log.py +234 -239
  120. floodmodeller_api/test/test_xml2d.py +151 -156
  121. floodmodeller_api/test/test_zzn.py +36 -34
  122. floodmodeller_api/to_from_json.py +218 -0
  123. floodmodeller_api/tool.py +332 -330
  124. floodmodeller_api/toolbox/__init__.py +5 -5
  125. floodmodeller_api/toolbox/example_tool.py +45 -45
  126. floodmodeller_api/toolbox/model_build/__init__.py +2 -2
  127. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +100 -94
  128. floodmodeller_api/toolbox/model_build/structure_log/__init__.py +1 -1
  129. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +287 -289
  130. floodmodeller_api/toolbox/model_build/structure_log_definition.py +76 -72
  131. floodmodeller_api/units/__init__.py +10 -10
  132. floodmodeller_api/units/_base.py +214 -209
  133. floodmodeller_api/units/boundaries.py +467 -469
  134. floodmodeller_api/units/comment.py +52 -55
  135. floodmodeller_api/units/conduits.py +382 -403
  136. floodmodeller_api/units/helpers.py +123 -132
  137. floodmodeller_api/units/iic.py +107 -101
  138. floodmodeller_api/units/losses.py +305 -308
  139. floodmodeller_api/units/sections.py +444 -445
  140. floodmodeller_api/units/structures.py +1690 -1684
  141. floodmodeller_api/units/units.py +93 -102
  142. floodmodeller_api/units/unsupported.py +44 -44
  143. floodmodeller_api/units/variables.py +87 -89
  144. floodmodeller_api/urban1d/__init__.py +11 -11
  145. floodmodeller_api/urban1d/_base.py +188 -177
  146. floodmodeller_api/urban1d/conduits.py +93 -85
  147. floodmodeller_api/urban1d/general_parameters.py +58 -58
  148. floodmodeller_api/urban1d/junctions.py +81 -79
  149. floodmodeller_api/urban1d/losses.py +81 -74
  150. floodmodeller_api/urban1d/outfalls.py +114 -107
  151. floodmodeller_api/urban1d/raingauges.py +111 -108
  152. floodmodeller_api/urban1d/subsections.py +92 -93
  153. floodmodeller_api/urban1d/xsections.py +147 -141
  154. floodmodeller_api/util.py +77 -21
  155. floodmodeller_api/validation/parameters.py +660 -660
  156. floodmodeller_api/validation/urban_parameters.py +388 -404
  157. floodmodeller_api/validation/validation.py +110 -112
  158. floodmodeller_api/version.py +1 -1
  159. floodmodeller_api/xml2d.py +688 -684
  160. floodmodeller_api/xml2d_template.py +37 -37
  161. floodmodeller_api/zzn.py +387 -365
  162. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/LICENSE.txt +13 -13
  163. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/METADATA +82 -82
  164. floodmodeller_api-0.4.3.dist-info/RECORD +179 -0
  165. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/WHEEL +1 -1
  166. floodmodeller_api-0.4.3.dist-info/entry_points.txt +3 -0
  167. floodmodeller_api/libifcoremd.dll +0 -0
  168. floodmodeller_api/test/test_data/EX3.bmp +0 -0
  169. floodmodeller_api/test/test_data/test_output.csv +0 -87
  170. floodmodeller_api/zzn_read.dll +0 -0
  171. floodmodeller_api-0.4.2.data/scripts/fmapi-add_siltation.bat +0 -2
  172. floodmodeller_api-0.4.2.data/scripts/fmapi-add_siltation.py +0 -3
  173. floodmodeller_api-0.4.2.data/scripts/fmapi-structure_log.bat +0 -2
  174. floodmodeller_api-0.4.2.data/scripts/fmapi-structure_log.py +0 -3
  175. floodmodeller_api-0.4.2.data/scripts/fmapi-toolbox.bat +0 -2
  176. floodmodeller_api-0.4.2.data/scripts/fmapi-toolbox.py +0 -41
  177. floodmodeller_api-0.4.2.dist-info/RECORD +0 -169
  178. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/top_level.txt +0 -0
floodmodeller_api/zzn.py CHANGED
@@ -1,365 +1,387 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2023 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- import ctypes as ct
18
- from pathlib import Path
19
- from typing import Optional, Union
20
-
21
- import numpy as np
22
- import pandas as pd
23
-
24
- from ._base import FMFile
25
-
26
-
27
- class ZZN(FMFile):
28
- """Reads and processes Flood Modeller 1D binary results format '.zzn'
29
-
30
- Args:
31
- zzn_filepath (str): Full filepath to model zzn file
32
-
33
- Output:
34
- Initiates 'ZZN' class object
35
- """
36
-
37
- _filetype: str = "ZZN"
38
- _suffix: str = ".zzn"
39
-
40
- def __init__(self, zzn_filepath: Optional[Union[str, Path]]):
41
- try:
42
- self._filepath = zzn_filepath
43
- FMFile.__init__(self)
44
-
45
- # Get zzn_dll path
46
- zzn_dll = Path(Path(__file__).resolve().parent, "zzn_read.dll")
47
- # Using str() method as CDLL doesn't seem to like accepting Path object
48
- zzn_read = ct.CDLL(str(zzn_dll))
49
-
50
- # Get zzl path
51
- zzn = self._filepath
52
- zzl = zzn.with_suffix(".zzl")
53
- if not zzl.exists():
54
- raise FileNotFoundError(
55
- "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name."
56
- )
57
-
58
- self.meta = {} # Dict object to hold all metadata
59
- self.data = {} # Dict object to hold all data
60
-
61
- # PROCESS_ZZL
62
- self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
63
- self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
64
- self.meta["model_title"] = ct.create_string_buffer(b"", 128)
65
- self.meta["nnodes"] = ct.c_int(0)
66
- self.meta["label_length"] = ct.c_int(0)
67
- self.meta["dt"] = ct.c_float(0.0)
68
- self.meta["timestep0"] = ct.c_int(0)
69
- self.meta["ltimestep"] = ct.c_int(0)
70
- self.meta["save_int"] = ct.c_float(0.0)
71
- self.meta["is_quality"] = ct.c_bool(False)
72
- self.meta["nvars"] = ct.c_int(0)
73
- self.meta["tzero"] = (ct.c_int * 5)()
74
- self.meta["errstat"] = ct.c_int(0)
75
- zzn_read.PROCESS_ZZL(
76
- ct.byref(self.meta["zzl_name"]),
77
- ct.byref(self.meta["model_title"]),
78
- ct.byref(self.meta["nnodes"]),
79
- ct.byref(self.meta["label_length"]),
80
- ct.byref(self.meta["dt"]),
81
- ct.byref(self.meta["timestep0"]),
82
- ct.byref(self.meta["ltimestep"]),
83
- ct.byref(self.meta["save_int"]),
84
- ct.byref(self.meta["is_quality"]),
85
- ct.byref(self.meta["nvars"]),
86
- ct.byref(self.meta["tzero"]),
87
- ct.byref(self.meta["errstat"]),
88
- )
89
- # PROCESS_LABELS
90
- self.meta["labels"] = (
91
- ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
92
- )()
93
- zzn_read.PROCESS_LABELS(
94
- ct.byref(self.meta["zzl_name"]),
95
- ct.byref(self.meta["nnodes"]),
96
- ct.byref(self.meta["labels"]),
97
- ct.byref(self.meta["label_length"]),
98
- ct.byref(self.meta["errstat"]),
99
- )
100
- # PREPROCESS_ZZN
101
- last_hr = (
102
- (self.meta["ltimestep"].value - self.meta["timestep0"].value)
103
- * self.meta["dt"].value
104
- / 3600
105
- )
106
- self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
107
- self.meta["aitimestep"] = (ct.c_int * 2)(
108
- self.meta["timestep0"].value, self.meta["ltimestep"].value
109
- )
110
- self.meta["isavint"] = (ct.c_int * 2)()
111
- zzn_read.PREPROCESS_ZZN(
112
- ct.byref(self.meta["output_hrs"]),
113
- ct.byref(self.meta["aitimestep"]),
114
- ct.byref(self.meta["dt"]),
115
- ct.byref(self.meta["timestep0"]),
116
- ct.byref(self.meta["ltimestep"]),
117
- ct.byref(self.meta["save_int"]),
118
- ct.byref(self.meta["isavint"]),
119
- )
120
- # PROCESS_ZZN
121
- self.meta["node_ID"] = ct.c_int(-1)
122
- self.meta["savint_skip"] = ct.c_int(1)
123
- self.meta["savint_range"] = ct.c_int(
124
- int(
125
- (
126
- (self.meta["isavint"][1] - self.meta["isavint"][0])
127
- / self.meta["savint_skip"].value
128
- )
129
- )
130
- )
131
- nx = self.meta["nnodes"].value
132
- ny = self.meta["nvars"].value
133
- nz = self.meta["savint_range"].value + 1
134
- self.data["all_results"] = (ct.c_float * nx * ny * nz)()
135
- self.data["max_results"] = (ct.c_float * nx * ny)()
136
- self.data["min_results"] = (ct.c_float * nx * ny)()
137
- self.data["max_times"] = (ct.c_int * nx * ny)()
138
- self.data["min_times"] = (ct.c_int * nx * ny)()
139
- zzn_read.PROCESS_ZZN(
140
- ct.byref(self.meta["zzn_name"]),
141
- ct.byref(self.meta["node_ID"]),
142
- ct.byref(self.meta["nnodes"]),
143
- ct.byref(self.meta["is_quality"]),
144
- ct.byref(self.meta["nvars"]),
145
- ct.byref(self.meta["savint_range"]),
146
- ct.byref(self.meta["savint_skip"]),
147
- ct.byref(self.data["all_results"]),
148
- ct.byref(self.data["max_results"]),
149
- ct.byref(self.data["min_results"]),
150
- ct.byref(self.data["max_times"]),
151
- ct.byref(self.data["min_times"]),
152
- ct.byref(self.meta["errstat"]),
153
- ct.byref(self.meta["isavint"]),
154
- )
155
-
156
- # Convert useful metadata from C types into python types
157
-
158
- self.meta["dt"] = self.meta["dt"].value
159
- self.meta["nnodes"] = self.meta["nnodes"].value
160
- self.meta["save_int"] = self.meta["save_int"].value
161
- self.meta["nvars"] = self.meta["nvars"].value
162
- self.meta["savint_range"] = self.meta["savint_range"].value
163
-
164
- self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
165
- self.meta["labels"] = [
166
- label.value.decode().strip() for label in list(self.meta["labels"])
167
- ]
168
- self.meta["model_title"] = self.meta["model_title"].value.decode()
169
-
170
- except Exception as e:
171
- self._handle_exception(e, when="read")
172
-
173
- def to_dataframe(
174
- self,
175
- result_type: Optional[str] = "all",
176
- variable: Optional[str] = "all",
177
- include_time: Optional[bool] = False,
178
- multilevel_header: Optional[bool] = True,
179
- ) -> pd.DataFrame:
180
- """Loads zzn results to pandas dataframe object.
181
-
182
- Args:
183
- result_type (str, optional): {'all'} | 'max' | 'min'
184
- Define whether to return all timesteps or just max/min results. Defaults to 'all'.
185
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
186
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
187
- include_time (bool, optional):
188
- Whether to include the time of max or min results. Defaults to False.
189
- multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
190
- headers with the variable as first level and node label as second header. If False, the column
191
- names will be formatted "{node label}_{variable}". Defaults to True.
192
-
193
- Returns:
194
- pandas.DataFrame(): dataframe object of simulation results
195
- """
196
- nx = self.meta["nnodes"]
197
- ny = self.meta["nvars"]
198
- nz = self.meta["savint_range"] + 1
199
- result_type = result_type.lower()
200
-
201
- if result_type == "all":
202
- arr = np.array(self.data["all_results"])
203
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
204
- vars = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
205
- if multilevel_header:
206
- col_names = [vars, self.meta["labels"]]
207
- df = pd.DataFrame(
208
- arr.reshape(nz, nx * ny),
209
- index=time_index,
210
- columns=pd.MultiIndex.from_product(col_names),
211
- )
212
- df.index.name = "Time (hr)"
213
- if not variable == "all":
214
- return df[variable.capitalize()]
215
-
216
- else:
217
- col_names = [f"{node}_{var}" for var in vars for node in self.meta["labels"]]
218
- df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
219
- df.index.name = "Time (hr)"
220
- if not variable == "all":
221
- use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
222
- return df[use_cols]
223
- return df
224
-
225
- elif (result_type == "max") or (result_type == "min"):
226
- arr = np.array(self.data[f"{result_type}_results"]).transpose()
227
- node_index = self.meta["labels"]
228
- col_names = [
229
- result_type.capitalize() + lbl
230
- for lbl in [
231
- " Flow",
232
- " Stage",
233
- " Froude",
234
- " Velocity",
235
- " Mode",
236
- " State",
237
- ]
238
- ]
239
- df = pd.DataFrame(arr, index=node_index, columns=col_names)
240
- df.index.name = "Node Label"
241
-
242
- if include_time:
243
- times = np.array(self.data[f"{result_type}_times"]).transpose()
244
- # transform timestep into hrs
245
- times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
246
- time_col_names = [name + " Time(hrs)" for name in col_names]
247
- time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
248
- time_df.index.name = "Node Label"
249
- df = pd.concat([df, time_df], axis=1)
250
- new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
251
- df = df[new_col_order]
252
- if not variable == "all":
253
- return df[
254
- [
255
- f"{result_type.capitalize()} {variable.capitalize()}",
256
- f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
257
- ]
258
- ]
259
- return df
260
-
261
- if not variable == "all":
262
- return df[f"{result_type.capitalize()} {variable.capitalize()}"]
263
- return df
264
-
265
- else:
266
- raise ValueError(f'Result type: "{result_type}" not recognised')
267
-
268
- def export_to_csv(
269
- self,
270
- save_location: Optional[Union[str, Path]] = "default",
271
- result_type: Optional[str] = "all",
272
- variable: Optional[str] = "all",
273
- include_time: Optional[bool] = False,
274
- ) -> None:
275
- """Exports zzn results to CSV file.
276
-
277
- Args:
278
- save_location (str, optional): {default} | folder or file path
279
- Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
280
- result_type (str, optional): {all} | max | min
281
- Define whether to output all timesteps or just max/min results. Defaults to 'all'.
282
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
283
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
284
- include_time (bool, optional):
285
- Whether to include the time of max or min results. Defaults to False.
286
-
287
- Raises:
288
- Exception: Raised if result_type set to invalid option
289
- """
290
- if save_location == "default":
291
- save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
292
- else:
293
- save_location = Path(save_location)
294
- if not save_location.is_absolute():
295
- # for if relative folder path given
296
- save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
297
-
298
- if not save_location.suffix == ".csv": # Assumed to be pointing to a folder
299
- # Check if the folder exists, if not create it
300
- if not save_location.exists():
301
- Path.mkdir(save_location)
302
- save_location = Path(
303
- save_location, Path(self.meta["zzn_name"]).with_suffix(".csv").name
304
- )
305
-
306
- else:
307
- if not save_location.parent.exists():
308
- Path.mkdir(save_location.parent)
309
-
310
- result_type = result_type.lower()
311
-
312
- if not result_type.lower() in ["all", "max", "min"]:
313
- raise Exception(
314
- f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' "
315
- )
316
-
317
- df = self.to_dataframe(
318
- result_type=result_type, variable=variable, include_time=include_time
319
- )
320
- df.to_csv(save_location)
321
- print(f"CSV saved to {save_location}")
322
-
323
- def to_dict_of_dataframes(self, variable: Optional[str] = "all") -> dict:
324
- """Loads zzn results to a dictionary of pandas dataframe objects.
325
-
326
- Args:
327
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
328
- Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
329
- variable names. Defaults to 'all'.
330
-
331
- Returns:
332
- dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
333
- """
334
- nx = self.meta["nnodes"]
335
- ny = self.meta["nvars"]
336
- nz = self.meta["savint_range"] + 1
337
- output = {}
338
-
339
- arr = np.array(self.data["all_results"])
340
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
341
-
342
- vars = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
343
-
344
- col_names = self.meta["labels"]
345
- temp_arr = np.reshape(arr, (nz, ny, nx))
346
-
347
- for i, var in enumerate(vars):
348
- output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
349
- output[var].index.name = "Time (hr)"
350
-
351
- output["Time (hr)"] = time_index
352
-
353
- if variable != "all":
354
- input_vars = variable.split(",")
355
- for i, var in enumerate(input_vars):
356
- input_vars[i] = var.strip().capitalize()
357
- if not input_vars[i] in vars:
358
- raise Exception(
359
- f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars} "
360
- )
361
-
362
- for var in vars:
363
- if var not in input_vars:
364
- del output[var]
365
- return output
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import ctypes as ct
20
+ from pathlib import Path
21
+ from typing import Any
22
+
23
+ import numpy as np
24
+ import pandas as pd
25
+
26
+ from ._base import FMFile
27
+ from .util import is_windows
28
+
29
+
30
+ class ZZN(FMFile):
31
+ """Reads and processes Flood Modeller 1D binary results format '.zzn'
32
+
33
+ Args:
34
+ zzn_filepath (str): Full filepath to model zzn file
35
+
36
+ Output:
37
+ Initiates 'ZZN' class object
38
+ """
39
+
40
+ _filetype: str = "ZZN"
41
+ _suffix: str = ".zzn"
42
+
43
+ def __init__( # noqa: PLR0915
44
+ self,
45
+ zzn_filepath: str | Path | None = None,
46
+ from_json: bool = False,
47
+ ):
48
+ try:
49
+ if from_json:
50
+ return
51
+ FMFile.__init__(self, zzn_filepath)
52
+
53
+ # Get zzn_dll path
54
+ lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
55
+ zzn_dll = Path(__file__).resolve().parent / "libs" / lib
56
+
57
+ # Catch LD_LIBRARY_PATH error for linux
58
+ try:
59
+ zzn_read = ct.CDLL(str(zzn_dll))
60
+ except OSError as e:
61
+ msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
62
+ if msg_1 in str(e):
63
+ msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
64
+ raise OSError(msg_2) from e
65
+ raise
66
+
67
+ # Get zzl path
68
+ zzn = self._filepath
69
+ zzl = zzn.with_suffix(".zzl")
70
+ if not zzl.exists():
71
+ raise FileNotFoundError(
72
+ "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name.",
73
+ )
74
+
75
+ self.meta: dict[str, Any] = {} # Dict object to hold all metadata
76
+ self.data = {} # Dict object to hold all data
77
+
78
+ # PROCESS_ZZL
79
+ self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
80
+ self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
81
+ self.meta["model_title"] = ct.create_string_buffer(b"", 128)
82
+ self.meta["nnodes"] = ct.c_int(0)
83
+ self.meta["label_length"] = ct.c_int(0)
84
+ self.meta["dt"] = ct.c_float(0.0)
85
+ self.meta["timestep0"] = ct.c_int(0)
86
+ self.meta["ltimestep"] = ct.c_int(0)
87
+ self.meta["save_int"] = ct.c_float(0.0)
88
+ self.meta["is_quality"] = ct.c_bool(False)
89
+ self.meta["nvars"] = ct.c_int(0)
90
+ self.meta["tzero"] = (ct.c_int * 5)()
91
+ self.meta["errstat"] = ct.c_int(0)
92
+ zzn_read.process_zzl(
93
+ ct.byref(self.meta["zzl_name"]),
94
+ ct.byref(self.meta["model_title"]),
95
+ ct.byref(self.meta["nnodes"]),
96
+ ct.byref(self.meta["label_length"]),
97
+ ct.byref(self.meta["dt"]),
98
+ ct.byref(self.meta["timestep0"]),
99
+ ct.byref(self.meta["ltimestep"]),
100
+ ct.byref(self.meta["save_int"]),
101
+ ct.byref(self.meta["is_quality"]),
102
+ ct.byref(self.meta["nvars"]),
103
+ ct.byref(self.meta["tzero"]),
104
+ ct.byref(self.meta["errstat"]),
105
+ )
106
+ # PROCESS_LABELS
107
+ self.meta["labels"] = (
108
+ ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
109
+ )()
110
+ zzn_read.process_labels(
111
+ ct.byref(self.meta["zzl_name"]),
112
+ ct.byref(self.meta["nnodes"]),
113
+ ct.byref(self.meta["label_length"]),
114
+ ct.byref(self.meta["errstat"]),
115
+ )
116
+ for i in range(self.meta["nnodes"].value):
117
+ zzn_read.get_zz_label(
118
+ ct.byref(ct.c_int(i + 1)),
119
+ ct.byref(self.meta["labels"][i]),
120
+ ct.byref(self.meta["errstat"]),
121
+ )
122
+ # PREPROCESS_ZZN
123
+ last_hr = (
124
+ (self.meta["ltimestep"].value - self.meta["timestep0"].value)
125
+ * self.meta["dt"].value
126
+ / 3600
127
+ )
128
+ self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
129
+ self.meta["aitimestep"] = (ct.c_int * 2)(
130
+ self.meta["timestep0"].value,
131
+ self.meta["ltimestep"].value,
132
+ )
133
+ self.meta["isavint"] = (ct.c_int * 2)()
134
+ zzn_read.preprocess_zzn(
135
+ ct.byref(self.meta["output_hrs"]),
136
+ ct.byref(self.meta["aitimestep"]),
137
+ ct.byref(self.meta["dt"]),
138
+ ct.byref(self.meta["timestep0"]),
139
+ ct.byref(self.meta["ltimestep"]),
140
+ ct.byref(self.meta["save_int"]),
141
+ ct.byref(self.meta["isavint"]),
142
+ )
143
+ # PROCESS_ZZN
144
+ self.meta["node_ID"] = ct.c_int(-1)
145
+ self.meta["savint_skip"] = ct.c_int(1)
146
+ self.meta["savint_range"] = ct.c_int(
147
+ int(
148
+ (self.meta["isavint"][1] - self.meta["isavint"][0])
149
+ / self.meta["savint_skip"].value,
150
+ ),
151
+ )
152
+ nx = self.meta["nnodes"].value
153
+ ny = self.meta["nvars"].value
154
+ nz = self.meta["savint_range"].value + 1
155
+ self.data["all_results"] = (ct.c_float * nx * ny * nz)()
156
+ self.data["max_results"] = (ct.c_float * nx * ny)()
157
+ self.data["min_results"] = (ct.c_float * nx * ny)()
158
+ self.data["max_times"] = (ct.c_int * nx * ny)()
159
+ self.data["min_times"] = (ct.c_int * nx * ny)()
160
+ zzn_read.process_zzn(
161
+ ct.byref(self.meta["zzn_name"]),
162
+ ct.byref(self.meta["node_ID"]),
163
+ ct.byref(self.meta["nnodes"]),
164
+ ct.byref(self.meta["is_quality"]),
165
+ ct.byref(self.meta["nvars"]),
166
+ ct.byref(self.meta["savint_range"]),
167
+ ct.byref(self.meta["savint_skip"]),
168
+ ct.byref(self.data["all_results"]),
169
+ ct.byref(self.data["max_results"]),
170
+ ct.byref(self.data["min_results"]),
171
+ ct.byref(self.data["max_times"]),
172
+ ct.byref(self.data["min_times"]),
173
+ ct.byref(self.meta["errstat"]),
174
+ ct.byref(self.meta["isavint"]),
175
+ )
176
+
177
+ # Convert useful metadata from C types into python types
178
+
179
+ self.meta["dt"] = self.meta["dt"].value
180
+ self.meta["nnodes"] = self.meta["nnodes"].value
181
+ self.meta["save_int"] = self.meta["save_int"].value
182
+ self.meta["nvars"] = self.meta["nvars"].value
183
+ self.meta["savint_range"] = self.meta["savint_range"].value
184
+
185
+ self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
186
+ self.meta["labels"] = [
187
+ label.value.decode().strip() for label in list(self.meta["labels"])
188
+ ]
189
+ self.meta["model_title"] = self.meta["model_title"].value.decode()
190
+
191
+ except Exception as e:
192
+ self._handle_exception(e, when="read")
193
+
194
+ def to_dataframe( # noqa: PLR0911
195
+ self,
196
+ result_type: str = "all",
197
+ variable: str = "all",
198
+ include_time: bool = False,
199
+ multilevel_header: bool = True,
200
+ ) -> pd.Series | pd.DataFrame:
201
+ """Loads zzn results to pandas dataframe object.
202
+
203
+ Args:
204
+ result_type (str, optional): {'all'} | 'max' | 'min'
205
+ Define whether to return all timesteps or just max/min results. Defaults to 'all'.
206
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
207
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
208
+ include_time (bool, optional):
209
+ Whether to include the time of max or min results. Defaults to False.
210
+ multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
211
+ headers with the variable as first level and node label as second header. If False, the column
212
+ names will be formatted "{node label}_{variable}". Defaults to True.
213
+
214
+ Returns:
215
+ pandas.DataFrame(): dataframe object of simulation results
216
+ """
217
+ nx = self.meta["nnodes"]
218
+ ny = self.meta["nvars"]
219
+ nz = self.meta["savint_range"] + 1
220
+ result_type = result_type.lower()
221
+
222
+ if result_type == "all":
223
+ arr = np.array(self.data["all_results"])
224
+ time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
225
+ vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
226
+ if multilevel_header:
227
+ col_names = [vars_list, self.meta["labels"]]
228
+ df = pd.DataFrame(
229
+ arr.reshape(nz, nx * ny),
230
+ index=time_index,
231
+ columns=pd.MultiIndex.from_product(col_names),
232
+ )
233
+ df.index.name = "Time (hr)"
234
+ if variable != "all":
235
+ return df[variable.capitalize()]
236
+
237
+ else:
238
+ col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
239
+ df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
240
+ df.index.name = "Time (hr)"
241
+ if variable != "all":
242
+ use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
243
+ return df[use_cols]
244
+ return df
245
+
246
+ if result_type in ("max", "min"):
247
+ arr = np.array(self.data[f"{result_type}_results"]).transpose()
248
+ node_index = self.meta["labels"]
249
+ col_names = [
250
+ result_type.capitalize() + lbl
251
+ for lbl in [
252
+ " Flow",
253
+ " Stage",
254
+ " Froude",
255
+ " Velocity",
256
+ " Mode",
257
+ " State",
258
+ ]
259
+ ]
260
+ df = pd.DataFrame(arr, index=node_index, columns=col_names)
261
+ df.index.name = "Node Label"
262
+
263
+ if include_time:
264
+ times = np.array(self.data[f"{result_type}_times"]).transpose()
265
+ # transform timestep into hrs
266
+ times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
267
+ time_col_names = [name + " Time(hrs)" for name in col_names]
268
+ time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
269
+ time_df.index.name = "Node Label"
270
+ df = pd.concat([df, time_df], axis=1)
271
+ new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
272
+ df = df[new_col_order]
273
+ if variable != "all":
274
+ return df[
275
+ [
276
+ f"{result_type.capitalize()} {variable.capitalize()}",
277
+ f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
278
+ ]
279
+ ]
280
+ return df
281
+
282
+ if variable != "all":
283
+ return df[f"{result_type.capitalize()} {variable.capitalize()}"]
284
+ return df
285
+
286
+ raise ValueError(f'Result type: "{result_type}" not recognised')
287
+
288
+ def export_to_csv(
289
+ self,
290
+ save_location: str | Path = "default",
291
+ result_type: str = "all",
292
+ variable: str = "all",
293
+ include_time: bool = False,
294
+ ) -> None:
295
+ """Exports zzn results to CSV file.
296
+
297
+ Args:
298
+ save_location (str, optional): {default} | folder or file path
299
+ Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
300
+ result_type (str, optional): {all} | max | min
301
+ Define whether to output all timesteps or just max/min results. Defaults to 'all'.
302
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
303
+ Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
304
+ include_time (bool, optional):
305
+ Whether to include the time of max or min results. Defaults to False.
306
+
307
+ Raises:
308
+ Exception: Raised if result_type set to invalid option
309
+ """
310
+ if save_location == "default":
311
+ save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
312
+ else:
313
+ save_location = Path(save_location)
314
+ if not save_location.is_absolute():
315
+ # for if relative folder path given
316
+ save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
317
+
318
+ if save_location.suffix != ".csv": # Assumed to be pointing to a folder
319
+ # Check if the folder exists, if not create it
320
+ if not save_location.exists():
321
+ Path.mkdir(save_location)
322
+ save_location = Path(
323
+ save_location,
324
+ Path(self.meta["zzn_name"]).with_suffix(".csv").name,
325
+ )
326
+
327
+ elif not save_location.parent.exists():
328
+ Path.mkdir(save_location.parent)
329
+
330
+ result_type = result_type.lower()
331
+
332
+ if result_type.lower() not in ["all", "max", "min"]:
333
+ raise Exception(
334
+ f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' ",
335
+ )
336
+
337
+ df = self.to_dataframe(
338
+ result_type=result_type,
339
+ variable=variable,
340
+ include_time=include_time,
341
+ )
342
+ df.to_csv(save_location)
343
+ print(f"CSV saved to {save_location}")
344
+
345
+ def to_dict_of_dataframes(self, variable: str = "all") -> dict:
346
+ """Loads zzn results to a dictionary of pandas dataframe objects.
347
+
348
+ Args:
349
+ variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
350
+ Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
351
+ variable names. Defaults to 'all'.
352
+
353
+ Returns:
354
+ dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
355
+ """
356
+ nx = self.meta["nnodes"]
357
+ ny = self.meta["nvars"]
358
+ nz = self.meta["savint_range"] + 1
359
+ output = {}
360
+
361
+ arr = np.array(self.data["all_results"])
362
+ time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
363
+
364
+ vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
365
+
366
+ col_names = self.meta["labels"]
367
+ temp_arr = np.reshape(arr, (nz, ny, nx))
368
+
369
+ for i, var in enumerate(vars_list):
370
+ output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
371
+ output[var].index.name = "Time (hr)"
372
+
373
+ output["Time (hr)"] = time_index
374
+
375
+ if variable != "all":
376
+ input_vars = variable.split(",")
377
+ for i, var in enumerate(input_vars):
378
+ input_vars[i] = var.strip().capitalize()
379
+ if input_vars[i] not in vars_list:
380
+ raise Exception(
381
+ f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} ",
382
+ )
383
+
384
+ for var in vars_list:
385
+ if var not in input_vars:
386
+ del output[var]
387
+ return output