floodmodeller-api 0.4.2.post1__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. floodmodeller_api/__init__.py +8 -9
  2. floodmodeller_api/_base.py +169 -176
  3. floodmodeller_api/backup.py +273 -273
  4. floodmodeller_api/dat.py +889 -831
  5. floodmodeller_api/diff.py +136 -119
  6. floodmodeller_api/ied.py +302 -306
  7. floodmodeller_api/ief.py +553 -637
  8. floodmodeller_api/ief_flags.py +253 -253
  9. floodmodeller_api/inp.py +260 -266
  10. floodmodeller_api/libs/libifcoremd.dll +0 -0
  11. floodmodeller_api/libs/libifcoremt.so.5 +0 -0
  12. floodmodeller_api/libs/libifport.so.5 +0 -0
  13. floodmodeller_api/{libmmd.dll → libs/libimf.so} +0 -0
  14. floodmodeller_api/libs/libintlc.so.5 +0 -0
  15. floodmodeller_api/libs/libmmd.dll +0 -0
  16. floodmodeller_api/libs/libsvml.so +0 -0
  17. floodmodeller_api/libs/libzzn_read.so +0 -0
  18. floodmodeller_api/libs/zzn_read.dll +0 -0
  19. floodmodeller_api/logs/__init__.py +2 -2
  20. floodmodeller_api/logs/lf.py +364 -312
  21. floodmodeller_api/logs/lf_helpers.py +354 -352
  22. floodmodeller_api/logs/lf_params.py +643 -529
  23. floodmodeller_api/mapping.py +84 -0
  24. floodmodeller_api/test/__init__.py +4 -4
  25. floodmodeller_api/test/conftest.py +16 -8
  26. floodmodeller_api/test/test_backup.py +117 -117
  27. floodmodeller_api/test/test_conveyance.py +107 -0
  28. floodmodeller_api/test/test_dat.py +222 -92
  29. floodmodeller_api/test/test_data/All Units 4_6.DAT +1081 -1081
  30. floodmodeller_api/test/test_data/All Units 4_6.feb +1081 -1081
  31. floodmodeller_api/test/test_data/BRIDGE.DAT +926 -926
  32. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.dat +36 -36
  33. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.feb +36 -36
  34. floodmodeller_api/test/test_data/DamBreakADI.xml +52 -52
  35. floodmodeller_api/test/test_data/DamBreakFAST.xml +58 -58
  36. floodmodeller_api/test/test_data/DamBreakFAST_dy.xml +53 -53
  37. floodmodeller_api/test/test_data/DamBreakTVD.xml +55 -55
  38. floodmodeller_api/test/test_data/DefenceBreach.xml +53 -53
  39. floodmodeller_api/test/test_data/DefenceBreachFAST.xml +60 -60
  40. floodmodeller_api/test/test_data/DefenceBreachFAST_dy.xml +55 -55
  41. floodmodeller_api/test/test_data/Domain1+2_QH.xml +76 -76
  42. floodmodeller_api/test/test_data/Domain1_H.xml +41 -41
  43. floodmodeller_api/test/test_data/Domain1_Q.xml +41 -41
  44. floodmodeller_api/test/test_data/Domain1_Q_FAST.xml +48 -48
  45. floodmodeller_api/test/test_data/Domain1_Q_FAST_dy.xml +48 -48
  46. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +263 -0
  47. floodmodeller_api/test/test_data/Domain1_W.xml +41 -41
  48. floodmodeller_api/test/test_data/EX1.DAT +321 -321
  49. floodmodeller_api/test/test_data/EX1.ext +107 -107
  50. floodmodeller_api/test/test_data/EX1.feb +320 -320
  51. floodmodeller_api/test/test_data/EX1.gxy +107 -107
  52. floodmodeller_api/test/test_data/EX17.DAT +421 -422
  53. floodmodeller_api/test/test_data/EX17.ext +213 -213
  54. floodmodeller_api/test/test_data/EX17.feb +422 -422
  55. floodmodeller_api/test/test_data/EX18.DAT +375 -375
  56. floodmodeller_api/test/test_data/EX18_DAT_expected.json +3876 -0
  57. floodmodeller_api/test/test_data/EX2.DAT +302 -302
  58. floodmodeller_api/test/test_data/EX3.DAT +926 -926
  59. floodmodeller_api/test/test_data/EX3_DAT_expected.json +16235 -0
  60. floodmodeller_api/test/test_data/EX3_IEF_expected.json +61 -0
  61. floodmodeller_api/test/test_data/EX6.DAT +2084 -2084
  62. floodmodeller_api/test/test_data/EX6.ext +532 -532
  63. floodmodeller_api/test/test_data/EX6.feb +2084 -2084
  64. floodmodeller_api/test/test_data/EX6_DAT_expected.json +31647 -0
  65. floodmodeller_api/test/test_data/Event Data Example.DAT +336 -336
  66. floodmodeller_api/test/test_data/Event Data Example.ext +107 -107
  67. floodmodeller_api/test/test_data/Event Data Example.feb +336 -336
  68. floodmodeller_api/test/test_data/Linked1D2D.xml +52 -52
  69. floodmodeller_api/test/test_data/Linked1D2DFAST.xml +53 -53
  70. floodmodeller_api/test/test_data/Linked1D2DFAST_dy.xml +48 -48
  71. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +313 -0
  72. floodmodeller_api/test/test_data/blockage.dat +50 -50
  73. floodmodeller_api/test/test_data/blockage.ext +45 -45
  74. floodmodeller_api/test/test_data/blockage.feb +9 -9
  75. floodmodeller_api/test/test_data/blockage.gxy +71 -71
  76. floodmodeller_api/test/test_data/conveyance_test.dat +165 -0
  77. floodmodeller_api/test/test_data/conveyance_test.feb +116 -0
  78. floodmodeller_api/test/test_data/conveyance_test.gxy +85 -0
  79. floodmodeller_api/test/test_data/defaultUnits.dat +127 -127
  80. floodmodeller_api/test/test_data/defaultUnits.ext +45 -45
  81. floodmodeller_api/test/test_data/defaultUnits.feb +9 -9
  82. floodmodeller_api/test/test_data/defaultUnits.fmpx +58 -58
  83. floodmodeller_api/test/test_data/defaultUnits.gxy +85 -85
  84. floodmodeller_api/test/test_data/ex3.ief +20 -20
  85. floodmodeller_api/test/test_data/ex3.lf1 +2800 -2800
  86. floodmodeller_api/test/test_data/ex4.DAT +1374 -1374
  87. floodmodeller_api/test/test_data/ex4_changed.DAT +1374 -1374
  88. floodmodeller_api/test/test_data/example1.inp +329 -329
  89. floodmodeller_api/test/test_data/example2.inp +158 -158
  90. floodmodeller_api/test/test_data/example3.inp +297 -297
  91. floodmodeller_api/test/test_data/example4.inp +388 -388
  92. floodmodeller_api/test/test_data/example5.inp +147 -147
  93. floodmodeller_api/test/test_data/example6.inp +154 -154
  94. floodmodeller_api/test/test_data/expected_conveyance.csv +60 -0
  95. floodmodeller_api/test/test_data/jump.dat +176 -176
  96. floodmodeller_api/test/test_data/network.dat +1374 -1374
  97. floodmodeller_api/test/test_data/network.ext +45 -45
  98. floodmodeller_api/test/test_data/network.exy +1 -1
  99. floodmodeller_api/test/test_data/network.feb +45 -45
  100. floodmodeller_api/test/test_data/network.ied +45 -45
  101. floodmodeller_api/test/test_data/network.ief +20 -20
  102. floodmodeller_api/test/test_data/network.inp +147 -147
  103. floodmodeller_api/test/test_data/network.pxy +57 -57
  104. floodmodeller_api/test/test_data/network.zzd +122 -122
  105. floodmodeller_api/test/test_data/network_dat_expected.json +21837 -0
  106. floodmodeller_api/test/test_data/network_from_tabularCSV.csv +87 -87
  107. floodmodeller_api/test/test_data/network_ied_expected.json +287 -0
  108. floodmodeller_api/test/test_data/rnweir.dat +9 -9
  109. floodmodeller_api/test/test_data/rnweir.ext +45 -45
  110. floodmodeller_api/test/test_data/rnweir.feb +9 -9
  111. floodmodeller_api/test/test_data/rnweir.gxy +45 -45
  112. floodmodeller_api/test/test_data/rnweir_default.dat +74 -74
  113. floodmodeller_api/test/test_data/rnweir_default.ext +45 -45
  114. floodmodeller_api/test/test_data/rnweir_default.feb +9 -9
  115. floodmodeller_api/test/test_data/rnweir_default.fmpx +58 -58
  116. floodmodeller_api/test/test_data/rnweir_default.gxy +53 -53
  117. floodmodeller_api/test/test_data/unit checks.dat +16 -16
  118. floodmodeller_api/test/test_ied.py +29 -29
  119. floodmodeller_api/test/test_ief.py +136 -24
  120. floodmodeller_api/test/test_inp.py +47 -48
  121. floodmodeller_api/test/test_json.py +114 -0
  122. floodmodeller_api/test/test_logs_lf.py +102 -51
  123. floodmodeller_api/test/test_tool.py +165 -152
  124. floodmodeller_api/test/test_toolbox_structure_log.py +234 -239
  125. floodmodeller_api/test/test_xml2d.py +151 -156
  126. floodmodeller_api/test/test_zzn.py +36 -34
  127. floodmodeller_api/to_from_json.py +230 -0
  128. floodmodeller_api/tool.py +332 -329
  129. floodmodeller_api/toolbox/__init__.py +5 -5
  130. floodmodeller_api/toolbox/example_tool.py +45 -45
  131. floodmodeller_api/toolbox/model_build/__init__.py +2 -2
  132. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +100 -98
  133. floodmodeller_api/toolbox/model_build/structure_log/__init__.py +1 -1
  134. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +287 -289
  135. floodmodeller_api/toolbox/model_build/structure_log_definition.py +76 -76
  136. floodmodeller_api/units/__init__.py +10 -10
  137. floodmodeller_api/units/_base.py +214 -212
  138. floodmodeller_api/units/boundaries.py +467 -467
  139. floodmodeller_api/units/comment.py +52 -55
  140. floodmodeller_api/units/conduits.py +382 -402
  141. floodmodeller_api/units/conveyance.py +301 -0
  142. floodmodeller_api/units/helpers.py +123 -131
  143. floodmodeller_api/units/iic.py +107 -101
  144. floodmodeller_api/units/losses.py +305 -306
  145. floodmodeller_api/units/sections.py +465 -446
  146. floodmodeller_api/units/structures.py +1690 -1683
  147. floodmodeller_api/units/units.py +93 -104
  148. floodmodeller_api/units/unsupported.py +44 -44
  149. floodmodeller_api/units/variables.py +87 -89
  150. floodmodeller_api/urban1d/__init__.py +11 -11
  151. floodmodeller_api/urban1d/_base.py +188 -179
  152. floodmodeller_api/urban1d/conduits.py +93 -85
  153. floodmodeller_api/urban1d/general_parameters.py +58 -58
  154. floodmodeller_api/urban1d/junctions.py +81 -79
  155. floodmodeller_api/urban1d/losses.py +81 -74
  156. floodmodeller_api/urban1d/outfalls.py +114 -110
  157. floodmodeller_api/urban1d/raingauges.py +111 -111
  158. floodmodeller_api/urban1d/subsections.py +92 -98
  159. floodmodeller_api/urban1d/xsections.py +147 -144
  160. floodmodeller_api/util.py +119 -21
  161. floodmodeller_api/validation/parameters.py +660 -660
  162. floodmodeller_api/validation/urban_parameters.py +388 -404
  163. floodmodeller_api/validation/validation.py +110 -108
  164. floodmodeller_api/version.py +1 -1
  165. floodmodeller_api/xml2d.py +632 -673
  166. floodmodeller_api/xml2d_template.py +37 -37
  167. floodmodeller_api/zzn.py +414 -363
  168. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/LICENSE.txt +13 -13
  169. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/METADATA +85 -82
  170. floodmodeller_api-0.4.4.dist-info/RECORD +185 -0
  171. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/WHEEL +1 -1
  172. floodmodeller_api/libifcoremd.dll +0 -0
  173. floodmodeller_api/test/test_data/EX3.bmp +0 -0
  174. floodmodeller_api/test/test_data/test_output.csv +0 -87
  175. floodmodeller_api/zzn_read.dll +0 -0
  176. floodmodeller_api-0.4.2.post1.dist-info/RECORD +0 -164
  177. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/entry_points.txt +0 -0
  178. {floodmodeller_api-0.4.2.post1.dist-info → floodmodeller_api-0.4.4.dist-info}/top_level.txt +0 -0
@@ -1,352 +1,354 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2023 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- import datetime as dt
18
- from abc import ABC, abstractmethod
19
- from typing import List, Optional, Union
20
-
21
- import pandas as pd
22
-
23
-
24
- class Data(ABC):
25
- def __init__(self, header: str, subheaders: Optional[list]):
26
- self.header = header
27
- self.no_values = 0
28
- self._subheaders = subheaders
29
-
30
- @abstractmethod
31
- def update(self):
32
- pass
33
-
34
- @abstractmethod
35
- def get_value(self):
36
- pass
37
-
38
-
39
- class LastData(Data):
40
- def __init__(self, header: str, subheaders: Optional[list]):
41
- super().__init__(header, subheaders)
42
- self._value = None # single value
43
-
44
- def update(self, data):
45
- self._value = data
46
- self.no_values = 1
47
-
48
- def get_value(self, index_key=None, index_df=None):
49
- return self._value
50
-
51
-
52
- class AllData(Data):
53
- def __init__(self, header: str, subheaders: Optional[list]):
54
- super().__init__(header, subheaders)
55
- self._value: List[object] = [] # list
56
-
57
- def update(self, data):
58
- self._value.append(data)
59
- self.no_values += 1
60
-
61
- def get_value(
62
- self, index_key: Optional[str] = None, index_df: Optional[pd.Series] = None
63
- ) -> pd.DataFrame:
64
- df = pd.DataFrame(self._value)
65
-
66
- # do nothing to empty dataframes
67
- if df.empty:
68
- return df
69
-
70
- # overall header
71
- if self._subheaders is None:
72
- df.rename(columns={df.columns[0]: self.header}, inplace=True)
73
-
74
- elif index_key is not None:
75
- # subheaders
76
- df = df.set_axis(self._subheaders, axis=1)
77
-
78
- # remove duplicate of index
79
- # sometimes it includes extra values
80
- # it also has different precision
81
- index_duplicate = index_key + "_duplicate"
82
- if index_duplicate in df.columns:
83
- index_df = df[index_duplicate].dt.round("1s")
84
-
85
- df.drop(index_duplicate, axis=1, inplace=True)
86
- # self._subheaders.remove(index_duplicate)
87
-
88
- # there is no index because *this* is the index
89
- if index_key is None:
90
- return df
91
-
92
- # made lf index the dataframe index
93
- df[index_key] = index_df
94
- df.dropna(inplace=True)
95
- df.drop_duplicates(subset=index_key, keep="last", inplace=True)
96
- df.set_index(index_key, inplace=True)
97
-
98
- return df
99
-
100
-
101
- def data_factory(data_type: str, header: str, subheaders: Optional[list] = None):
102
- if data_type == "last":
103
- return LastData(header, subheaders)
104
- if data_type == "all":
105
- return AllData(header, subheaders)
106
- raise ValueError(f'Unexpected data "{data_type}"')
107
-
108
-
109
- class State(ABC):
110
- def __init__(self, extracted_data):
111
- pass
112
-
113
- @abstractmethod
114
- def report_progress(self):
115
- pass
116
-
117
-
118
- class UnsteadyState(State):
119
- def __init__(self, extracted_data):
120
- self._progress_data = extracted_data["progress"].data
121
-
122
- def report_progress(self) -> float:
123
- """Returns last progress percentage"""
124
-
125
- progress = self._progress_data.get_value()
126
-
127
- if progress is None:
128
- return 0
129
-
130
- return progress
131
-
132
-
133
- class SteadyState(State):
134
- def report_progress(self):
135
- raise NotImplementedError("No progress reporting for steady simulations")
136
-
137
-
138
- def state_factory(steady: bool, extracted_data: Data) -> State:
139
- if steady is True:
140
- return SteadyState(extracted_data)
141
- return UnsteadyState(extracted_data)
142
-
143
-
144
- class Parser(ABC):
145
- """Abstract base class for processing and storing different types of line
146
-
147
- Args:
148
- name
149
- prefix
150
- data_type
151
- exclude
152
- is_index
153
- before_index
154
- """
155
-
156
- _nan: object
157
-
158
- def __init__(
159
- self,
160
- name: str,
161
- prefix: str,
162
- data_type: str,
163
- exclude: Optional[str] = None,
164
- is_index: Optional[bool] = False,
165
- before_index: Optional[bool] = False,
166
- ):
167
- self._name = name
168
-
169
- self.prefix = prefix
170
- self.is_index = is_index
171
- self.before_index = before_index
172
-
173
- self._exclude = exclude
174
-
175
- self.no_values = 0
176
-
177
- self.data_type = data_type
178
- self.data = data_factory(data_type, name)
179
-
180
- def process_line(self, raw_line: str) -> None:
181
- """self._process_line with exception handling of expected nan values"""
182
-
183
- try:
184
- processed_line = self._process_line(raw_line)
185
-
186
- except ValueError as e:
187
- if self._exclude and raw_line in self._exclude:
188
- processed_line = self._nan
189
- else:
190
- raise e
191
-
192
- self.data.update(processed_line)
193
-
194
- @abstractmethod
195
- def _process_line(self, raw: str) -> object:
196
- """Converts string to meaningful data"""
197
-
198
-
199
- class DateTimeParser(Parser):
200
- """Extra argument from superclass code: str"""
201
-
202
- def __init__(self, *args, **kwargs):
203
- self._code = kwargs.pop("code")
204
- super().__init__(*args, **kwargs)
205
- self._nan = pd.NaT
206
-
207
- def _process_line(self, raw: str) -> dt.datetime:
208
- """Converts string to datetime"""
209
-
210
- processed = dt.datetime.strptime(raw, self._code)
211
-
212
- return processed
213
-
214
-
215
- class TimeParser(Parser):
216
- """Extra argument from superclass code: str"""
217
-
218
- def __init__(self, *args, **kwargs):
219
- self._code = kwargs.pop("code")
220
- super().__init__(*args, **kwargs)
221
- self._nan = pd.NaT
222
-
223
- def _process_line(self, raw: str) -> dt.time:
224
- """Converts string to time"""
225
-
226
- raw, _, _ = raw.partition(" ") # Temp fix to ignore '(+n d)' in EFT
227
- processed = dt.datetime.strptime(raw, self._code).time()
228
-
229
- return processed
230
-
231
-
232
- class TimeDeltaHMSParser(Parser):
233
- def __init__(self, *args, **kwargs):
234
- super().__init__(*args, **kwargs)
235
- self._nan = pd.NaT
236
-
237
- def _process_line(self, raw: str) -> dt.timedelta:
238
- """Converts string HH:MM:SS to timedelta"""
239
-
240
- h, m, s = raw.split(":")
241
- processed = dt.timedelta(hours=int(h), minutes=int(m), seconds=int(s))
242
-
243
- return processed
244
-
245
-
246
- class TimeDeltaHParser(Parser):
247
- def __init__(self, *args, **kwargs):
248
- super().__init__(*args, **kwargs)
249
- self._nan = pd.NaT
250
-
251
- def _process_line(self, raw: str) -> dt.timedelta:
252
- """Converts string H (with decimal place and "hrs") to timedelta"""
253
-
254
- h = raw.split("hrs")[0]
255
- processed = dt.timedelta(hours=float(h))
256
-
257
- return processed
258
-
259
-
260
- class TimeDeltaSParser(Parser):
261
- def __init__(self, *args, **kwargs):
262
- super().__init__(*args, **kwargs)
263
- self._nan = pd.NaT
264
-
265
- def _process_line(self, raw: str) -> dt.timedelta:
266
- """Converts string S (with decimal place and "s") to timedelta"""
267
-
268
- s = raw.split("s")[0] # TODO: not necessary for simulation time
269
- processed = dt.timedelta(seconds=float(s))
270
-
271
- return processed
272
-
273
-
274
- class FloatParser(Parser):
275
- def __init__(self, *args, **kwargs):
276
- super().__init__(*args, **kwargs)
277
- self._nan = float("nan")
278
-
279
- def _process_line(self, raw: str) -> float:
280
- """Converts string to float"""
281
-
282
- processed = float(raw)
283
-
284
- return processed
285
-
286
-
287
- class FloatSplitParser(Parser):
288
- """Extra argument from superclass split: list"""
289
-
290
- def __init__(self, *args, **kwargs):
291
- self._split = kwargs.pop("split")
292
- super().__init__(*args, **kwargs)
293
- self._nan = float("nan")
294
-
295
- def _process_line(self, raw: str) -> float:
296
- """Converts string to float, removing everything after split"""
297
-
298
- processed = float(raw.split(self._split)[0])
299
-
300
- return processed
301
-
302
-
303
- class StringParser(Parser):
304
- def __init__(self, *args, **kwargs):
305
- super().__init__(*args, **kwargs)
306
- self._nan = ""
307
-
308
- def _process_line(self, raw: str) -> str:
309
- """No conversion necessary"""
310
-
311
- processed = raw
312
-
313
- return processed
314
-
315
-
316
- class StringSplitParser(Parser):
317
- """Extra argument from superclass split: str"""
318
-
319
- def __init__(self, *args, **kwargs):
320
- self._split = kwargs.pop("split")
321
- super().__init__(*args, **kwargs)
322
- self._nan = ""
323
-
324
- def _process_line(self, raw: str) -> str:
325
- """Removes everything after split"""
326
-
327
- processed = raw.split(self._split)[0]
328
-
329
- return processed
330
-
331
-
332
- class TimeFloatMultParser(Parser):
333
- """Extra argument from superclass names: list"""
334
-
335
- def __init__(self, *args, **kwargs):
336
- self._subheaders = kwargs.pop("subheaders")
337
- super().__init__(*args, **kwargs)
338
-
339
- self._nan = []
340
- for _ in self._subheaders:
341
- self._nan.append(float("nan"))
342
-
343
- self.data = data_factory(self.data_type, self._name, self._subheaders) # overwrite
344
-
345
- def _process_line(self, raw: str) -> List[Union[dt.timedelta, float]]:
346
- """Converts string to list of one timedelta and then floats"""
347
-
348
- as_float = [float(x) for x in raw.split()]
349
- first_as_timedelta = dt.timedelta(hours=float(as_float[0]))
350
- processed = [first_as_timedelta] + as_float[1:]
351
-
352
- return processed
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import datetime as dt
20
+ from abc import ABC, abstractmethod
21
+
22
+ import pandas as pd
23
+
24
+
25
+ class Data(ABC):
26
+ def __init__(self, header: str, subheaders: list | None):
27
+ self.header = header
28
+ self.no_values = 0
29
+ self._subheaders = subheaders
30
+
31
+ @abstractmethod
32
+ def update(self):
33
+ pass
34
+
35
+ @abstractmethod
36
+ def get_value(self):
37
+ pass
38
+
39
+
40
+ class LastData(Data):
41
+ def __init__(self, header: str, subheaders: list | None):
42
+ super().__init__(header, subheaders)
43
+ self._value = None # single value
44
+
45
+ def update(self, data):
46
+ self._value = data
47
+ self.no_values = 1
48
+
49
+ def get_value(self, index_key=None, index_df=None):
50
+ return self._value
51
+
52
+
53
+ class AllData(Data):
54
+ def __init__(self, header: str, subheaders: list | None):
55
+ super().__init__(header, subheaders)
56
+ self._value: list[object] = []
57
+
58
+ def update(self, data):
59
+ self._value.append(data)
60
+ self.no_values += 1
61
+
62
+ def get_value(
63
+ self,
64
+ index_key: str | None = None,
65
+ index_df: pd.Series | None = None,
66
+ ) -> pd.DataFrame:
67
+ df = pd.DataFrame(self._value)
68
+
69
+ # do nothing to empty dataframes
70
+ if df.empty:
71
+ return df
72
+
73
+ # overall header
74
+ if self._subheaders is None:
75
+ df.rename(columns={df.columns[0]: self.header}, inplace=True)
76
+
77
+ elif index_key is not None:
78
+ # subheaders
79
+ df = df.set_axis(self._subheaders, axis=1)
80
+
81
+ # remove duplicate of index
82
+ # sometimes it includes extra values
83
+ # it also has different precision
84
+ index_duplicate = index_key + "_duplicate"
85
+ if index_duplicate in df.columns:
86
+ try:
87
+ index_df = df[index_duplicate].dt.round("1s")
88
+ df.drop(index_duplicate, axis=1, inplace=True)
89
+ except AttributeError:
90
+ df = df.drop(columns=index_duplicate)
91
+
92
+ # there is no index because *this* is the index
93
+ if index_key is None:
94
+ return df
95
+
96
+ # made lf index the dataframe index
97
+ df[index_key] = index_df
98
+ df.dropna(inplace=True)
99
+ df.drop_duplicates(subset=index_key, keep="last", inplace=True)
100
+ df.set_index(index_key, inplace=True)
101
+
102
+ return df
103
+
104
+
105
+ def data_factory(data_type: str, header: str, subheaders: list | None = None):
106
+ if data_type == "last":
107
+ return LastData(header, subheaders)
108
+ if data_type == "all":
109
+ return AllData(header, subheaders)
110
+ raise ValueError(f'Unexpected data "{data_type}"')
111
+
112
+
113
+ class State(ABC):
114
+ @abstractmethod
115
+ def __init__(self, extracted_data):
116
+ pass
117
+
118
+ @abstractmethod
119
+ def report_progress(self):
120
+ pass
121
+
122
+
123
+ class UnsteadyState(State):
124
+ def __init__(self, extracted_data):
125
+ self._progress_data = extracted_data["progress"].data
126
+
127
+ def report_progress(self) -> float:
128
+ """Returns last progress percentage"""
129
+
130
+ progress = self._progress_data.get_value()
131
+
132
+ if progress is None:
133
+ return 0
134
+
135
+ return progress
136
+
137
+
138
+ class SteadyState(State):
139
+ def __init__(self, extracted_data):
140
+ pass
141
+
142
+ def report_progress(self):
143
+ raise NotImplementedError("No progress reporting for steady simulations")
144
+
145
+
146
+ def state_factory(steady: bool, extracted_data: Data) -> State:
147
+ if steady is True:
148
+ return SteadyState(extracted_data)
149
+ return UnsteadyState(extracted_data)
150
+
151
+
152
+ class Parser(ABC):
153
+ """Abstract base class for processing and storing different types of line
154
+
155
+ Args:
156
+ name
157
+ prefix
158
+ data_type
159
+ exclude
160
+ is_index
161
+ before_index
162
+ """
163
+
164
+ _nan: object
165
+
166
+ def __init__( # noqa: PLR0913
167
+ self,
168
+ name: str,
169
+ prefix: str,
170
+ data_type: str,
171
+ exclude: str | None = None,
172
+ is_index: bool | None = False,
173
+ before_index: bool | None = False,
174
+ ):
175
+ self._name = name
176
+
177
+ self.prefix = prefix
178
+ self.is_index = is_index
179
+ self.before_index = before_index
180
+
181
+ self._exclude = exclude
182
+
183
+ self.no_values = 0
184
+
185
+ self.data_type = data_type
186
+ self.data = data_factory(data_type, name)
187
+
188
+ def process_line(self, raw_line: str) -> None:
189
+ """self._process_line with exception handling of expected nan values"""
190
+
191
+ try:
192
+ processed_line = self._process_line(raw_line)
193
+
194
+ except ValueError as e:
195
+ if self._exclude and raw_line in self._exclude:
196
+ processed_line = self._nan
197
+ else:
198
+ raise e
199
+
200
+ self.data.update(processed_line)
201
+
202
+ @abstractmethod
203
+ def _process_line(self, raw: str) -> object:
204
+ """Converts string to meaningful data"""
205
+
206
+
207
+ class DateTimeParser(Parser):
208
+ """Extra argument from superclass code: str"""
209
+
210
+ def __init__(self, *args, **kwargs):
211
+ self._code = kwargs.pop("code")
212
+ super().__init__(*args, **kwargs)
213
+ self._nan = pd.NaT
214
+
215
+ def _process_line(self, raw: str) -> dt.datetime:
216
+ """Converts string to datetime"""
217
+ return dt.datetime.strptime(raw, self._code)
218
+
219
+
220
+ class TimeParser(Parser):
221
+ """Extra argument from superclass code: str"""
222
+
223
+ def __init__(self, *args, **kwargs):
224
+ self._code = kwargs.pop("code")
225
+ super().__init__(*args, **kwargs)
226
+ self._nan = pd.NaT
227
+
228
+ def _process_line(self, raw: str) -> dt.time:
229
+ """Converts string to time"""
230
+
231
+ raw, _, _ = raw.partition(" ") # Temp fix to ignore '(+n d)' in EFT
232
+ return dt.datetime.strptime(raw, self._code).time()
233
+
234
+
235
+ class TimeDeltaHMSParser(Parser):
236
+ def __init__(self, *args, **kwargs):
237
+ super().__init__(*args, **kwargs)
238
+ self._nan = pd.NaT
239
+
240
+ def _process_line(self, raw: str) -> dt.timedelta:
241
+ """Converts string HH:MM:SS to timedelta"""
242
+
243
+ h, m, s = raw.split(":")
244
+ return dt.timedelta(hours=int(h), minutes=int(m), seconds=int(s))
245
+
246
+
247
+ class TimeDeltaHParser(Parser):
248
+ def __init__(self, *args, **kwargs):
249
+ super().__init__(*args, **kwargs)
250
+ self._nan = pd.NaT
251
+
252
+ def _process_line(self, raw: str) -> dt.timedelta:
253
+ """Converts string H (with decimal place and "hrs") to timedelta"""
254
+
255
+ h = raw.split("hrs")[0]
256
+ return dt.timedelta(hours=float(h))
257
+
258
+
259
+ class TimeDeltaSParser(Parser):
260
+ def __init__(self, *args, **kwargs):
261
+ super().__init__(*args, **kwargs)
262
+ self._nan = pd.NaT
263
+
264
+ def _process_line(self, raw: str) -> dt.timedelta:
265
+ """Converts string S (with decimal place and "s") to timedelta"""
266
+
267
+ s = raw.split("s")[0] # TODO: not necessary for simulation time
268
+ return dt.timedelta(seconds=float(s))
269
+
270
+
271
+ class FloatParser(Parser):
272
+ def __init__(self, *args, **kwargs):
273
+ super().__init__(*args, **kwargs)
274
+ self._nan = float("nan")
275
+
276
+ def _process_line(self, raw: str) -> float:
277
+ """Converts string to float"""
278
+
279
+ return float(raw)
280
+
281
+
282
+ class FloatSplitParser(Parser):
283
+ """Extra argument from superclass split: list"""
284
+
285
+ def __init__(self, *args, **kwargs):
286
+ self._split = kwargs.pop("split")
287
+ super().__init__(*args, **kwargs)
288
+ self._nan = float("nan")
289
+
290
+ def _process_line(self, raw: str) -> float:
291
+ """Converts string to float, removing everything after split"""
292
+
293
+ return float(raw.split(self._split)[0])
294
+
295
+
296
+ class StringParser(Parser):
297
+ def __init__(self, *args, **kwargs):
298
+ super().__init__(*args, **kwargs)
299
+ self._nan = ""
300
+
301
+ def _process_line(self, raw: str) -> str:
302
+ """No conversion necessary"""
303
+
304
+ return raw
305
+
306
+
307
+ class StringSplitParser(Parser):
308
+ """Extra argument from superclass split: str"""
309
+
310
+ def __init__(self, *args, **kwargs):
311
+ self._split = kwargs.pop("split")
312
+ super().__init__(*args, **kwargs)
313
+ self._nan = ""
314
+
315
+ def _process_line(self, raw: str) -> str:
316
+ """Removes everything after split"""
317
+
318
+ return raw.split(self._split)[0]
319
+
320
+
321
+ class TimeFloatMultParser(Parser):
322
+ """Extra argument from superclass names: list"""
323
+
324
+ def __init__(self, *args, **kwargs):
325
+ self._subheaders = kwargs.pop("subheaders")
326
+ super().__init__(*args, **kwargs)
327
+
328
+ self._nan = []
329
+ for _ in self._subheaders:
330
+ self._nan.append(float("nan"))
331
+
332
+ self.data = data_factory(self.data_type, self._name, self._subheaders) # overwrite
333
+
334
+ def _process_line(self, raw: str) -> list[dt.timedelta | float]:
335
+ """Converts string to list of one timedelta and then floats"""
336
+
337
+ as_float = [float(x) for x in raw.split()]
338
+ first_as_timedelta = dt.timedelta(hours=float(as_float[0]))
339
+ return [first_as_timedelta] + as_float[1:]
340
+
341
+
342
+ class TimeSplitParser(Parser):
343
+ """Extra argument from superclass code: str, split: str"""
344
+
345
+ def __init__(self, *args, **kwargs):
346
+ self._code = kwargs.pop("code")
347
+ self._split = kwargs.pop("split")
348
+ super().__init__(*args, **kwargs)
349
+ self._nan = pd.NaT
350
+
351
+ def _process_line(self, raw: str) -> dt.time:
352
+ """Converts string to time, removing everything after split"""
353
+
354
+ return dt.datetime.strptime(raw.split(self._split)[0].strip(), self._code).time()