floodmodeller-api 0.4.2.post1__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. floodmodeller_api/__init__.py +8 -9
  2. floodmodeller_api/_base.py +169 -176
  3. floodmodeller_api/backup.py +273 -273
  4. floodmodeller_api/dat.py +889 -831
  5. floodmodeller_api/diff.py +136 -119
  6. floodmodeller_api/ied.py +302 -306
  7. floodmodeller_api/ief.py +553 -637
  8. floodmodeller_api/ief_flags.py +253 -253
  9. floodmodeller_api/inp.py +260 -266
  10. floodmodeller_api/libs/libifcoremd.dll +0 -0
  11. floodmodeller_api/libs/libifcoremt.so.5 +0 -0
  12. floodmodeller_api/libs/libifport.so.5 +0 -0
  13. floodmodeller_api/{libmmd.dll → libs/libimf.so} +0 -0
  14. floodmodeller_api/libs/libintlc.so.5 +0 -0
  15. floodmodeller_api/libs/libmmd.dll +0 -0
  16. floodmodeller_api/libs/libsvml.so +0 -0
  17. floodmodeller_api/libs/libzzn_read.so +0 -0
  18. floodmodeller_api/libs/zzn_read.dll +0 -0
  19. floodmodeller_api/logs/__init__.py +2 -2
  20. floodmodeller_api/logs/lf.py +364 -312
  21. floodmodeller_api/logs/lf_helpers.py +354 -352
  22. floodmodeller_api/logs/lf_params.py +643 -529
  23. floodmodeller_api/mapping.py +84 -0
  24. floodmodeller_api/test/__init__.py +4 -4
  25. floodmodeller_api/test/conftest.py +16 -8
  26. floodmodeller_api/test/test_backup.py +117 -117
  27. floodmodeller_api/test/test_conveyance.py +107 -0
  28. floodmodeller_api/test/test_dat.py +222 -92
  29. floodmodeller_api/test/test_data/All Units 4_6.DAT +1081 -1081
  30. floodmodeller_api/test/test_data/All Units 4_6.feb +1081 -1081
  31. floodmodeller_api/test/test_data/BRIDGE.DAT +926 -926
  32. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.dat +36 -36
  33. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.feb +36 -36
  34. floodmodeller_api/test/test_data/DamBreakADI.xml +52 -52
  35. floodmodeller_api/test/test_data/DamBreakFAST.xml +58 -58
  36. floodmodeller_api/test/test_data/DamBreakFAST_dy.xml +53 -53
  37. floodmodeller_api/test/test_data/DamBreakTVD.xml +55 -55
  38. floodmodeller_api/test/test_data/DefenceBreach.xml +53 -53
  39. floodmodeller_api/test/test_data/DefenceBreachFAST.xml +60 -60
  40. floodmodeller_api/test/test_data/DefenceBreachFAST_dy.xml +55 -55
  41. floodmodeller_api/test/test_data/Domain1+2_QH.xml +76 -76
  42. floodmodeller_api/test/test_data/Domain1_H.xml +41 -41
  43. floodmodeller_api/test/test_data/Domain1_Q.xml +41 -41
  44. floodmodeller_api/test/test_data/Domain1_Q_FAST.xml +48 -48
  45. floodmodeller_api/test/test_data/Domain1_Q_FAST_dy.xml +48 -48
  46. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +263 -0
  47. floodmodeller_api/test/test_data/Domain1_W.xml +41 -41
  48. floodmodeller_api/test/test_data/EX1.DAT +321 -321
  49. floodmodeller_api/test/test_data/EX1.ext +107 -107
  50. floodmodeller_api/test/test_data/EX1.feb +320 -320
  51. floodmodeller_api/test/test_data/EX1.gxy +107 -107
  52. floodmodeller_api/test/test_data/EX17.DAT +421 -422
  53. floodmodeller_api/test/test_data/EX17.ext +213 -213
  54. floodmodeller_api/test/test_data/EX17.feb +422 -422
  55. floodmodeller_api/test/test_data/EX18.DAT +375 -375
  56. floodmodeller_api/test/test_data/EX18_DAT_expected.json +3876 -0
  57. floodmodeller_api/test/test_data/EX2.DAT +302 -302
  58. floodmodeller_api/test/test_data/EX3.DAT +926 -926
  59. floodmodeller_api/test/test_data/EX3_DAT_expected.json +16235 -0
  60. floodmodeller_api/test/test_data/EX3_IEF_expected.json +61 -0
  61. floodmodeller_api/test/test_data/EX6.DAT +2084 -2084
  62. floodmodeller_api/test/test_data/EX6.ext +532 -532
  63. floodmodeller_api/test/test_data/EX6.feb +2084 -2084
  64. floodmodeller_api/test/test_data/EX6_DAT_expected.json +31647 -0
  65. floodmodeller_api/test/test_data/Event Data Example.DAT +336 -336
  66. floodmodeller_api/test/test_data/Event Data Example.ext +107 -107
  67. floodmodeller_api/test/test_data/Event Data Example.feb +336 -336
  68. floodmodeller_api/test/test_data/Linked1D2D.xml +52 -52
  69. floodmodeller_api/test/test_data/Linked1D2DFAST.xml +53 -53
  70. floodmodeller_api/test/test_data/Linked1D2DFAST_dy.xml +48 -48
  71. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +313 -0
  72. floodmodeller_api/test/test_data/blockage.dat +50 -50
  73. floodmodeller_api/test/test_data/blockage.ext +45 -45
  74. floodmodeller_api/test/test_data/blockage.feb +9 -9
  75. floodmodeller_api/test/test_data/blockage.gxy +71 -71
  76. floodmodeller_api/test/test_data/conveyance_test.dat +165 -0
  77. floodmodeller_api/test/test_data/conveyance_test.feb +116 -0
  78. floodmodeller_api/test/test_data/conveyance_test.gxy +85 -0
  79. floodmodeller_api/test/test_data/defaultUnits.dat +127 -127
  80. floodmodeller_api/test/test_data/defaultUnits.ext +45 -45
  81. floodmodeller_api/test/test_data/defaultUnits.feb +9 -9
  82. floodmodeller_api/test/test_data/defaultUnits.fmpx +58 -58
  83. floodmodeller_api/test/test_data/defaultUnits.gxy +85 -85
  84. floodmodeller_api/test/test_data/ex3.ief +20 -20
  85. floodmodeller_api/test/test_data/ex3.lf1 +2800 -2800
  86. floodmodeller_api/test/test_data/ex4.DAT +1374 -1374
  87. floodmodeller_api/test/test_data/ex4_changed.DAT +1374 -1374
  88. floodmodeller_api/test/test_data/example1.inp +329 -329
  89. floodmodeller_api/test/test_data/example2.inp +158 -158
  90. floodmodeller_api/test/test_data/example3.inp +297 -297
  91. floodmodeller_api/test/test_data/example4.inp +388 -388
  92. floodmodeller_api/test/test_data/example5.inp +147 -147
  93. floodmodeller_api/test/test_data/example6.inp +154 -154
  94. floodmodeller_api/test/test_data/expected_conveyance.csv +60 -0
  95. floodmodeller_api/test/test_data/jump.dat +176 -176
  96. floodmodeller_api/test/test_data/network.dat +1374 -1374
  97. floodmodeller_api/test/test_data/network.ext +45 -45
  98. floodmodeller_api/test/test_data/network.exy +1 -1
  99. floodmodeller_api/test/test_data/network.feb +45 -45
  100. floodmodeller_api/test/test_data/network.ied +45 -45
  101. floodmodeller_api/test/test_data/network.ief +20 -20
  102. floodmodeller_api/test/test_data/network.inp +147 -147
  103. floodmodeller_api/test/test_data/network.pxy +57 -57
  104. floodmodeller_api/test/test_data/network.zzd +122 -122
  105. floodmodeller_api/test/test_data/network_dat_expected.json +21837 -0
  106. floodmodeller_api/test/test_data/network_from_tabularCSV.csv +87 -87
  107. floodmodeller_api/test/test_data/network_ied_expected.json +287 -0
  108. floodmodeller_api/test/test_data/rnweir.dat +9 -9
  109. floodmodeller_api/test/test_data/rnweir.ext +45 -45
  110. floodmodeller_api/test/test_data/rnweir.feb +9 -9
  111. floodmodeller_api/test/test_data/rnweir.gxy +45 -45
  112. floodmodeller_api/test/test_data/rnweir_default.dat +74 -74
  113. floodmodeller_api/test/test_data/rnweir_default.ext +45 -45
  114. floodmodeller_api/test/test_data/rnweir_default.feb +9 -9
  115. floodmodeller_api/test/test_data/rnweir_default.fmpx +58 -58
  116. floodmodeller_api/test/test_data/rnweir_default.gxy +53 -53
  117. floodmodeller_api/test/test_data/unit checks.dat +16 -16
  118. floodmodeller_api/test/test_ied.py +29 -29
  119. floodmodeller_api/test/test_ief.py +136 -24
  120. floodmodeller_api/test/test_inp.py +47 -48
  121. floodmodeller_api/test/test_json.py +114 -0
  122. floodmodeller_api/test/test_logs_lf.py +102 -51
  123. floodmodeller_api/test/test_tool.py +165 -152
  124. floodmodeller_api/test/test_toolbox_structure_log.py +234 -239
  125. floodmodeller_api/test/test_xml2d.py +151 -156
  126. floodmodeller_api/test/test_zzn.py +36 -34
  127. floodmodeller_api/to_from_json.py +230 -0
  128. floodmodeller_api/tool.py +332 -329
  129. floodmodeller_api/toolbox/__init__.py +5 -5
  130. floodmodeller_api/toolbox/example_tool.py +45 -45
  131. floodmodeller_api/toolbox/model_build/__init__.py +2 -2
  132. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +100 -98
  133. floodmodeller_api/toolbox/model_build/structure_log/__init__.py +1 -1
  134. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +287 -289
  135. floodmodeller_api/toolbox/model_build/structure_log_definition.py +76 -76
  136. floodmodeller_api/units/__init__.py +10 -10
  137. floodmodeller_api/units/_base.py +214 -212
  138. floodmodeller_api/units/boundaries.py +467 -467
  139. floodmodeller_api/units/comment.py +52 -55
  140. floodmodeller_api/units/conduits.py +382 -402
  141. floodmodeller_api/units/conveyance.py +301 -0
  142. floodmodeller_api/units/helpers.py +123 -131
  143. floodmodeller_api/units/iic.py +107 -101
  144. floodmodeller_api/units/losses.py +305 -306
  145. floodmodeller_api/units/sections.py +465 -446
  146. floodmodeller_api/units/structures.py +1690 -1683
  147. floodmodeller_api/units/units.py +93 -104
  148. floodmodeller_api/units/unsupported.py +44 -44
  149. floodmodeller_api/units/variables.py +87 -89
  150. floodmodeller_api/urban1d/__init__.py +11 -11
  151. floodmodeller_api/urban1d/_base.py +188 -179
  152. floodmodeller_api/urban1d/conduits.py +93 -85
  153. floodmodeller_api/urban1d/general_parameters.py +58 -58
  154. floodmodeller_api/urban1d/junctions.py +81 -79
  155. floodmodeller_api/urban1d/losses.py +81 -74
  156. floodmodeller_api/urban1d/outfalls.py +114 -110
  157. floodmodeller_api/urban1d/raingauges.py +111 -111
  158. floodmodeller_api/urban1d/subsections.py +92 -98
  159. floodmodeller_api/urban1d/xsections.py +147 -144
  160. floodmodeller_api/util.py +119 -21
  161. floodmodeller_api/validation/parameters.py +660 -660
  162. floodmodeller_api/validation/urban_parameters.py +388 -404
  163. floodmodeller_api/validation/validation.py +110 -108
  164. floodmodeller_api/version.py +1 -1
  165. floodmodeller_api/xml2d.py +632 -673
  166. floodmodeller_api/xml2d_template.py +37 -37
  167. floodmodeller_api/zzn.py +414 -363
  168. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/LICENSE.txt +13 -13
  169. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/METADATA +85 -82
  170. floodmodeller_api-0.4.4.dist-info/RECORD +185 -0
  171. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/WHEEL +1 -1
  172. floodmodeller_api/libifcoremd.dll +0 -0
  173. floodmodeller_api/test/test_data/EX3.bmp +0 -0
  174. floodmodeller_api/test/test_data/test_output.csv +0 -87
  175. floodmodeller_api/zzn_read.dll +0 -0
  176. floodmodeller_api-0.4.2.post1.dist-info/RECORD +0 -164
  177. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/entry_points.txt +0 -0
  178. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/top_level.txt +0 -0
@@ -1,312 +1,364 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2023 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- from pathlib import Path
18
- from typing import Optional, Union
19
-
20
- import pandas as pd
21
-
22
- from .._base import FMFile
23
- from .lf_helpers import state_factory
24
- from .lf_params import lf1_steady_data_to_extract, lf1_unsteady_data_to_extract, lf2_data_to_extract
25
-
26
-
27
- class LF(FMFile):
28
- """Reads and processes Flood Modeller log file
29
-
30
- Args:
31
- lf1_filepath (str): Full filepath to model log file
32
- data_to_extract (dict): Dictionary defining each line type to parse
33
- steady (bool): True if for a steady-state simulation
34
-
35
- Output:
36
- Initiates 'LF' class object
37
- """
38
-
39
- def __init__(
40
- self,
41
- lf_filepath: Optional[Union[str, Path]],
42
- data_to_extract: dict,
43
- steady: bool = False,
44
- ):
45
- try:
46
- FMFile.__init__(self, lf_filepath)
47
-
48
- self._data_to_extract = data_to_extract
49
- self._init_counters()
50
- self._init_parsers()
51
- self._state = state_factory(steady, self._extracted_data)
52
-
53
- self._read()
54
-
55
- except Exception as e:
56
- self._handle_exception(e, when="read")
57
-
58
- def _read(self, force_reread: bool = False, suppress_final_step: bool = False):
59
- # Read LF file
60
- with open(self._filepath, "r") as lf_file:
61
- self._raw_data = [line.rstrip("\n") for line in lf_file.readlines()]
62
-
63
- # Force rereading from start of file
64
- if force_reread is True:
65
- self._del_attributes()
66
- self._init_counters()
67
- self._init_parsers()
68
-
69
- # Process file
70
- self._update_data()
71
-
72
- if not suppress_final_step:
73
- self._set_attributes()
74
-
75
- def read(self, force_reread: bool = False, suppress_final_step: bool = False) -> None:
76
- """Reads log file
77
-
78
- Args:
79
- force_reread (bool): If False, starts reading from where it stopped last time. If True, starts reading from the start of the file.
80
- suppress_final_step (bool): If False, dataframes and dictionary are not created as attributes.
81
-
82
- """
83
-
84
- self._read(force_reread, suppress_final_step)
85
-
86
- def _init_counters(self):
87
- """Initialises counters that keep track of file during simulation"""
88
-
89
- self._no_lines = 0 # number of lines that have been read so far
90
- self._no_iters = 0 # number of iterations so far
91
-
92
- def _init_parsers(self):
93
- """Creates dictionary of Parser objects for each entry in data_to_extract"""
94
-
95
- self._extracted_data = {}
96
-
97
- for key in self._data_to_extract:
98
- subdictionary = self._data_to_extract[key]
99
- subdictionary_class = subdictionary["class"]
100
- subdictionary_kwargs = {k: v for k, v in subdictionary.items() if k != "class"}
101
- subdictionary_kwargs["name"] = key
102
- self._extracted_data[key] = subdictionary_class(**subdictionary_kwargs)
103
-
104
- def _update_data(self):
105
- """Updates value of each Parser object based on raw data"""
106
-
107
- # self._print_no_lines()
108
-
109
- # loop through lines that haven't already been read
110
- raw_lines = self._raw_data[self._no_lines :]
111
- for raw_line in raw_lines:
112
- # loop through parser types
113
- for key in self._data_to_extract:
114
- parser = self._extracted_data[key]
115
-
116
- # lines which start with prefix
117
- if raw_line.startswith(parser.prefix):
118
- # store everything after prefix
119
- end_of_line = raw_line.split(parser.prefix)[1].lstrip()
120
- parser.process_line(end_of_line)
121
-
122
- # index marks the end of an iteration
123
- if parser.is_index is True:
124
- self._sync_cols()
125
- self._no_iters += 1
126
-
127
- # update counter
128
- self._no_lines += 1
129
-
130
- # self._print_no_lines()
131
-
132
- def _get_index(self):
133
- """Finds key and dataframe for variable that is the index"""
134
-
135
- for key in self._data_to_extract:
136
- try:
137
- self._data_to_extract[key]["is_index"]
138
- index_key = key
139
- index_df = self._extracted_data[key].data.get_value()
140
- return index_key, index_df
141
-
142
- except KeyError:
143
- pass
144
-
145
- raise Exception("No index variable found")
146
-
147
- def _set_attributes(self):
148
- """Makes each Parser value an attribute; "last" values in dictionary"""
149
-
150
- index_key, index_df = self._get_index()
151
-
152
- info = {}
153
-
154
- for key in self._data_to_extract:
155
- data_type = self._data_to_extract[key]["data_type"]
156
- value = self._extracted_data[key].data.get_value(index_key, index_df)
157
-
158
- if data_type == "all":
159
- setattr(self, key, value)
160
- elif data_type == "last" and value is not None:
161
- info[key] = value
162
-
163
- self.info = info
164
-
165
- def _del_attributes(self):
166
- """Deletes each Parser value direct attribute of LF"""
167
-
168
- for key in self._data_to_extract:
169
- data_type = self._data_to_extract[key]["data_type"]
170
- if data_type == "all":
171
- delattr(self, key)
172
-
173
- delattr(self, "info")
174
-
175
- def to_dataframe(self) -> pd.DataFrame:
176
- """Collects parameter values that change throughout simulation into a dataframe
177
-
178
- Returns:
179
- pd.DataFrame: DataFrame of log file parameters indexed by simulation time (unsteady) or network iterations (steady)
180
- """
181
-
182
- # TODO: make more like ZZN.to_dataframe
183
-
184
- data_type_all = {
185
- k: getattr(self, k) for k, v in self._data_to_extract.items() if v["data_type"] == "all"
186
- }
187
-
188
- df = pd.concat(data_type_all, axis=1)
189
- df.columns = df.columns.droplevel()
190
-
191
- df.sort_index(inplace=True)
192
-
193
- return df
194
-
195
- def _sync_cols(self):
196
- """Ensures Parser values (of type "all") have an entry each iteration"""
197
-
198
- # loop through parser types
199
- for key in self._data_to_extract:
200
- parser = self._extracted_data[key]
201
-
202
- # sync parser types that are not the index
203
- if parser.is_index is False:
204
- # if their number of values is not in sync
205
- if parser.data_type == "all" and parser.data.no_values < (
206
- self._no_iters + int(parser.before_index)
207
- ):
208
- # append nan to the list
209
- parser.data.update(parser._nan)
210
-
211
- def _print_no_lines(self):
212
- """Prints number of lines that have been read so far"""
213
-
214
- print("Last line read: " + str(self._no_lines))
215
-
216
- def report_progress(self) -> float:
217
- """Returns progress for unsteady simulations
218
-
219
- Returns:
220
- float: Last progress percentage recorded in log file
221
- """
222
-
223
- return self._state.report_progress()
224
-
225
-
226
- class LF1(LF):
227
- """Reads and processes Flood Modeller 1D log file '.lf1'
228
-
229
- Args:
230
- lf1_filepath (str): Full filepath to model lf1 file
231
- steady (bool): True for steady-state simulations
232
-
233
- **Attributes (unsteady)**
234
-
235
- Args:
236
- info (dict): Parameters with one value per simulation
237
- mass_error (pandas.DataFrame): Mass error
238
- timestep (pandas.DataFrame): Timestep
239
- elapsed (pandas.DataFrame): Elapsed
240
- simulated (pandas.DataFrame): Simulated
241
- iterations (pandas.DataFrame): PlotI1
242
- convergence (pandas.DataFrame): PlotC1
243
- flow (pandas.DataFrame): PlotF1
244
-
245
- **Attributes (steady)**
246
-
247
- Args:
248
- info (dict): Parameters with one value per simulation
249
- network_iteration (pandas.DataFrame): Network iteration
250
- largest_change_in_split_from_last_iteration (pandas.DataFrame): Largest change in split from last iteration
251
-
252
- Output:
253
- Initiates 'LF1' class object
254
- """
255
-
256
- _filetype: str = "LF1"
257
- _suffix: str = ".lf1"
258
-
259
- def __init__(self, lf_filepath: Optional[Union[str, Path]], steady: bool = False):
260
- if steady is False:
261
- data_to_extract = lf1_unsteady_data_to_extract
262
- else:
263
- data_to_extract = lf1_steady_data_to_extract
264
-
265
- super().__init__(lf_filepath, data_to_extract, steady)
266
-
267
-
268
- class LF2(LF):
269
- """Reads and processes Flood Modeller 1D log file '.lf2'
270
-
271
- Args:
272
- lf2_filepath (str): Full filepath to model lf2 file
273
-
274
- **Attributes**
275
-
276
- Args:
277
- info (dict): Parameters with one value per simulation
278
- simulated (pandas.DataFrame): Simulated
279
- wet_cells (pandas.DataFrame): Wet cells
280
- 2D_boundary_inflow (pandas.DataFrame): 2D boundary inflow
281
- 2D_boundary_outflow (pandas.DataFrame): 2D boundary outflow
282
- 1D_link_flow (pandas.DataFrame): 1D link flow
283
- change_in_volume (pandas.DataFrame): Change in volume
284
- volume (pandas.DataFrame): Volume
285
- inst_mass_err (pandas.DataFrame): Inst mass error
286
- mass_error (pandas.DataFrame): Mass error
287
- largest_cr (pandas.DataFrame): Largest Cr
288
- elapsed (pandas.DataFrame): Elapsed
289
-
290
- Output:
291
- Initiates 'LF2' class object
292
- """
293
-
294
- _filetype: str = "LF2"
295
- _suffix: str = ".lf2"
296
-
297
- def __init__(self, lf_filepath: Optional[Union[str, Path]]):
298
- data_to_extract = {
299
- **lf1_unsteady_data_to_extract,
300
- **lf2_data_to_extract,
301
- }
302
-
303
- super().__init__(lf_filepath, data_to_extract, steady=False)
304
-
305
-
306
- def lf_factory(filepath: str, suffix: str, steady: bool) -> LF:
307
- if suffix == "lf1":
308
- return LF1(filepath, steady)
309
- if suffix == "lf2":
310
- return LF2(filepath)
311
- flow_type = "steady" if steady else "unsteady"
312
- raise ValueError(f"Unexpected log file type {suffix} for {flow_type} flow")
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import datetime as dt
20
+ import time
21
+ from typing import TYPE_CHECKING
22
+
23
+ import pandas as pd
24
+
25
+ from .._base import FMFile
26
+ from ..util import handle_exception
27
+ from .lf_helpers import state_factory
28
+ from .lf_params import lf1_steady_data_to_extract, lf1_unsteady_data_to_extract, lf2_data_to_extract
29
+
30
+ if TYPE_CHECKING:
31
+ from pathlib import Path
32
+
33
+
34
+ OLD_FILE = 5
35
+ LOG_TIMEOUT = 10
36
+
37
+
38
+ class LF(FMFile):
39
+ """Reads and processes Flood Modeller log file
40
+
41
+ Args:
42
+ lf1_filepath (str): Full filepath to model log file
43
+ data_to_extract (dict): Dictionary defining each line type to parse
44
+ steady (bool): True if for a steady-state simulation
45
+
46
+ Output:
47
+ Initiates 'LF' class object
48
+ """
49
+
50
+ @handle_exception(when="read")
51
+ def __init__(
52
+ self,
53
+ lf_filepath: str | Path | None,
54
+ data_to_extract: dict,
55
+ steady: bool = False,
56
+ ):
57
+ FMFile.__init__(self, lf_filepath)
58
+
59
+ self._data_to_extract = data_to_extract
60
+ self._init_counters()
61
+ self._init_parsers()
62
+ self._state = state_factory(steady, self._extracted_data)
63
+
64
+ self._read()
65
+
66
+ def _read(self, force_reread: bool = False, suppress_final_step: bool = False):
67
+ # Read LF file
68
+ with open(self._filepath) as lf_file:
69
+ self._raw_data = [line.rstrip("\n") for line in lf_file.readlines()]
70
+
71
+ # Force rereading from start of file
72
+ if force_reread is True:
73
+ self._del_attributes()
74
+ self._init_counters()
75
+ self._init_parsers()
76
+
77
+ # Process file
78
+ self._update_data()
79
+
80
+ if not suppress_final_step:
81
+ self._set_attributes()
82
+
83
+ def read(self, force_reread: bool = False, suppress_final_step: bool = False) -> None:
84
+ """Reads log file
85
+
86
+ Args:
87
+ force_reread (bool): If False, starts reading from where it stopped last time. If True, starts reading from the start of the file.
88
+ suppress_final_step (bool): If False, dataframes and dictionary are not created as attributes.
89
+
90
+ """
91
+
92
+ self._read(force_reread, suppress_final_step)
93
+
94
+ def _init_counters(self):
95
+ """Initialises counters that keep track of file during simulation"""
96
+
97
+ self._no_lines = 0 # number of lines that have been read so far
98
+ self._no_iters = 0 # number of iterations so far
99
+
100
+ def _init_parsers(self):
101
+ """Creates dictionary of Parser objects for each entry in data_to_extract"""
102
+
103
+ self._extracted_data = {}
104
+
105
+ for key in self._data_to_extract:
106
+ subdictionary = self._data_to_extract[key]
107
+ subdictionary_class = subdictionary["class"]
108
+ subdictionary_kwargs = {k: v for k, v in subdictionary.items() if k != "class"}
109
+ subdictionary_kwargs["name"] = key
110
+ self._extracted_data[key] = subdictionary_class(**subdictionary_kwargs)
111
+
112
+ def _update_data(self):
113
+ """Updates value of each Parser object based on raw data"""
114
+
115
+ # loop through lines that haven't already been read
116
+ raw_lines = self._raw_data[self._no_lines :]
117
+ for raw_line in raw_lines:
118
+ # loop through parser types
119
+ for key in self._data_to_extract:
120
+ parser = self._extracted_data[key]
121
+
122
+ # lines which start with prefix
123
+ if raw_line.startswith(parser.prefix):
124
+ # store everything after prefix
125
+ end_of_line = raw_line.split(parser.prefix)[1].lstrip()
126
+ parser.process_line(end_of_line)
127
+
128
+ # index marks the end of an iteration
129
+ if parser.is_index is True:
130
+ self._sync_cols()
131
+ self._no_iters += 1
132
+
133
+ # update counter
134
+ self._no_lines += 1
135
+
136
+ def _get_index(self):
137
+ """Finds key and dataframe for variable that is the index"""
138
+
139
+ for key in self._data_to_extract:
140
+ try:
141
+ self._data_to_extract[key]["is_index"]
142
+ index_key = key
143
+ index_df = self._extracted_data[key].data.get_value()
144
+ return index_key, index_df
145
+
146
+ except KeyError:
147
+ pass
148
+
149
+ raise Exception("No index variable found")
150
+
151
+ def _set_attributes(self):
152
+ """Makes each Parser value an attribute; "last" values in dictionary"""
153
+
154
+ index_key, index_df = self._get_index()
155
+
156
+ info = {}
157
+
158
+ for key in self._data_to_extract:
159
+ data_type = self._data_to_extract[key]["data_type"]
160
+ value = self._extracted_data[key].data.get_value(index_key, index_df)
161
+
162
+ if data_type == "all":
163
+ setattr(self, key, value)
164
+ elif data_type == "last" and value is not None:
165
+ info[key] = value
166
+
167
+ self.info = info
168
+
169
+ def _del_attributes(self):
170
+ """Deletes each Parser value direct attribute of LF"""
171
+
172
+ for key in self._data_to_extract:
173
+ data_type = self._data_to_extract[key]["data_type"]
174
+ if data_type == "all":
175
+ delattr(self, key)
176
+
177
+ delattr(self, "info")
178
+
179
+ def to_dataframe(self, *, include_tuflow: bool = False) -> pd.DataFrame:
180
+ """Collects parameter values that change throughout simulation into a dataframe
181
+
182
+ Args:
183
+ include_tuflow (bool): Include diagnostics for linked TUFLOW models
184
+
185
+ Returns:
186
+ pd.DataFrame: DataFrame of log file parameters indexed by simulation time (unsteady) or network iterations (steady)
187
+ """
188
+
189
+ # TODO: make more like ZZN.to_dataframe
190
+
191
+ data_type_all = {
192
+ k: getattr(self, k)
193
+ for k, v in self._data_to_extract.items()
194
+ if v["data_type"] == "all" and (include_tuflow or "tuflow" not in k)
195
+ }
196
+
197
+ df = pd.concat(data_type_all, axis=1)
198
+ df.columns = df.columns.droplevel()
199
+
200
+ df.sort_index(inplace=True)
201
+
202
+ return df
203
+
204
+ def _sync_cols(self):
205
+ """Ensures Parser values (of type "all") have an entry each iteration"""
206
+
207
+ # loop through parser types
208
+ for key in self._data_to_extract:
209
+ parser = self._extracted_data[key]
210
+
211
+ # sync parser types that are not the index
212
+ if (
213
+ parser.is_index is False # if their number of values is not in sync
214
+ and parser.data_type == "all"
215
+ and parser.data.no_values < (self._no_iters + int(parser.before_index))
216
+ ):
217
+ # append nan to the list
218
+ parser.data.update(parser._nan)
219
+
220
+ def _print_no_lines(self):
221
+ """Prints number of lines that have been read so far"""
222
+
223
+ print("Last line read: " + str(self._no_lines))
224
+
225
+ def report_progress(self) -> float:
226
+ """Returns progress for unsteady simulations
227
+
228
+ Returns:
229
+ float: Last progress percentage recorded in log file
230
+ """
231
+
232
+ return self._state.report_progress()
233
+
234
+
235
+ class LF1(LF):
236
+ """Reads and processes Flood Modeller 1D log file '.lf1'
237
+
238
+ Args:
239
+ lf1_filepath (str): Full filepath to model lf1 file
240
+ steady (bool): True for steady-state simulations
241
+
242
+ **Attributes (unsteady)**
243
+
244
+ Args:
245
+ info (dict): Parameters with one value per simulation
246
+ mass_error (pandas.DataFrame): Mass error
247
+ timestep (pandas.DataFrame): Timestep
248
+ elapsed (pandas.DataFrame): Elapsed
249
+ tuflow_vol (pandas.DataFrame): TUFLOW HPC Vol
250
+ tuflow_n_wet (pandas.DataFrame): TUFLOW HPC nWet
251
+ tuflow_dt (pandas.DataFrame): TUFLOW HPC dt
252
+ simulated (pandas.DataFrame): Simulated
253
+ iterations (pandas.DataFrame): PlotI1
254
+ convergence (pandas.DataFrame): PlotC1
255
+ flow (pandas.DataFrame): PlotF1
256
+
257
+ **Attributes (steady)**
258
+
259
+ Args:
260
+ info (dict): Parameters with one value per simulation
261
+ network_iteration (pandas.DataFrame): Network iteration
262
+ largest_change_in_split_from_last_iteration (pandas.DataFrame): Largest change in split from last iteration
263
+
264
+ Output:
265
+ Initiates 'LF1' class object
266
+ """
267
+
268
+ _filetype: str = "LF1"
269
+ _suffix: str = ".lf1"
270
+
271
+ def __init__(self, lf_filepath: str | Path | None, steady: bool = False):
272
+ if steady is False:
273
+ data_to_extract = lf1_unsteady_data_to_extract
274
+ else:
275
+ data_to_extract = lf1_steady_data_to_extract
276
+
277
+ super().__init__(lf_filepath, data_to_extract, steady)
278
+
279
+
280
+ class LF2(LF):
281
+ """Reads and processes Flood Modeller 1D log file '.lf2'
282
+
283
+ Args:
284
+ lf2_filepath (str): Full filepath to model lf2 file
285
+
286
+ **Attributes**
287
+
288
+ Args:
289
+ info (dict): Parameters with one value per simulation
290
+ simulated (pandas.DataFrame): Simulated
291
+ wet_cells (pandas.DataFrame): Wet cells
292
+ 2D_boundary_inflow (pandas.DataFrame): 2D boundary inflow
293
+ 2D_boundary_outflow (pandas.DataFrame): 2D boundary outflow
294
+ 1D_link_flow (pandas.DataFrame): 1D link flow
295
+ change_in_volume (pandas.DataFrame): Change in volume
296
+ volume (pandas.DataFrame): Volume
297
+ inst_mass_err (pandas.DataFrame): Inst mass error
298
+ mass_error (pandas.DataFrame): Mass error
299
+ largest_cr (pandas.DataFrame): Largest Cr
300
+ elapsed (pandas.DataFrame): Elapsed
301
+
302
+ Output:
303
+ Initiates 'LF2' class object
304
+ """
305
+
306
+ _filetype: str = "LF2"
307
+ _suffix: str = ".lf2"
308
+
309
+ def __init__(self, lf_filepath: str | Path | None):
310
+ data_to_extract = {
311
+ **lf1_unsteady_data_to_extract,
312
+ **lf2_data_to_extract,
313
+ }
314
+
315
+ super().__init__(lf_filepath, data_to_extract, steady=False)
316
+
317
+
318
+ def create_lf(filepath: Path, suffix: str) -> LF1 | LF2 | None:
319
+ """Checks for a new log file, waiting for its creation if necessary"""
320
+
321
+ def _no_log_file(reason: str) -> None:
322
+ print(f"No progress bar as {reason}. Simulation will continue as usual.")
323
+
324
+ # ensure progress bar is supported
325
+ if suffix not in {"lf1", "lf2"}:
326
+ _no_log_file("log file must have suffix lf1 or lf2")
327
+ return None
328
+
329
+ # wait for log file to exist
330
+ log_file_exists = False
331
+ max_time = time.time() + LOG_TIMEOUT
332
+
333
+ while not log_file_exists:
334
+ time.sleep(0.1)
335
+
336
+ log_file_exists = filepath.is_file()
337
+
338
+ # timeout
339
+ if (not log_file_exists) and (time.time() > max_time):
340
+ _no_log_file("log file is expected but not detected")
341
+ return None
342
+
343
+ # wait for new log file
344
+ old_log_file = True
345
+ max_time = time.time() + LOG_TIMEOUT
346
+
347
+ while old_log_file:
348
+ time.sleep(0.1)
349
+
350
+ # difference between now and when log file was last modified
351
+ last_modified_timestamp = filepath.stat().st_mtime
352
+ last_modified = dt.datetime.fromtimestamp(last_modified_timestamp)
353
+ time_diff_sec = (dt.datetime.now() - last_modified).total_seconds()
354
+
355
+ # it's old if it's over OLD_FILE seconds old (TODO: is this robust?)
356
+ old_log_file = time_diff_sec > OLD_FILE
357
+
358
+ # timeout
359
+ if old_log_file and (time.time() > max_time):
360
+ _no_log_file("log file is from previous run")
361
+ return None
362
+
363
+ # create LF instance
364
+ return LF1(filepath) if suffix == "lf1" else LF2(filepath)