floodmodeller-api 0.4.2__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. floodmodeller_api/__init__.py +8 -9
  2. floodmodeller_api/_base.py +184 -176
  3. floodmodeller_api/backup.py +273 -273
  4. floodmodeller_api/dat.py +909 -838
  5. floodmodeller_api/diff.py +136 -119
  6. floodmodeller_api/ied.py +307 -311
  7. floodmodeller_api/ief.py +647 -646
  8. floodmodeller_api/ief_flags.py +253 -253
  9. floodmodeller_api/inp.py +266 -268
  10. floodmodeller_api/libs/libifcoremd.dll +0 -0
  11. floodmodeller_api/libs/libifcoremt.so.5 +0 -0
  12. floodmodeller_api/libs/libifport.so.5 +0 -0
  13. floodmodeller_api/{libmmd.dll → libs/libimf.so} +0 -0
  14. floodmodeller_api/libs/libintlc.so.5 +0 -0
  15. floodmodeller_api/libs/libmmd.dll +0 -0
  16. floodmodeller_api/libs/libsvml.so +0 -0
  17. floodmodeller_api/libs/libzzn_read.so +0 -0
  18. floodmodeller_api/libs/zzn_read.dll +0 -0
  19. floodmodeller_api/logs/__init__.py +2 -2
  20. floodmodeller_api/logs/lf.py +320 -314
  21. floodmodeller_api/logs/lf_helpers.py +354 -346
  22. floodmodeller_api/logs/lf_params.py +643 -529
  23. floodmodeller_api/mapping.py +84 -0
  24. floodmodeller_api/test/__init__.py +4 -4
  25. floodmodeller_api/test/conftest.py +9 -8
  26. floodmodeller_api/test/test_backup.py +117 -117
  27. floodmodeller_api/test/test_dat.py +221 -92
  28. floodmodeller_api/test/test_data/All Units 4_6.DAT +1081 -1081
  29. floodmodeller_api/test/test_data/All Units 4_6.feb +1081 -1081
  30. floodmodeller_api/test/test_data/BRIDGE.DAT +926 -926
  31. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.dat +36 -36
  32. floodmodeller_api/test/test_data/Culvert_Inlet_Outlet.feb +36 -36
  33. floodmodeller_api/test/test_data/DamBreakADI.xml +52 -52
  34. floodmodeller_api/test/test_data/DamBreakFAST.xml +58 -58
  35. floodmodeller_api/test/test_data/DamBreakFAST_dy.xml +53 -53
  36. floodmodeller_api/test/test_data/DamBreakTVD.xml +55 -55
  37. floodmodeller_api/test/test_data/DefenceBreach.xml +53 -53
  38. floodmodeller_api/test/test_data/DefenceBreachFAST.xml +60 -60
  39. floodmodeller_api/test/test_data/DefenceBreachFAST_dy.xml +55 -55
  40. floodmodeller_api/test/test_data/Domain1+2_QH.xml +76 -76
  41. floodmodeller_api/test/test_data/Domain1_H.xml +41 -41
  42. floodmodeller_api/test/test_data/Domain1_Q.xml +41 -41
  43. floodmodeller_api/test/test_data/Domain1_Q_FAST.xml +48 -48
  44. floodmodeller_api/test/test_data/Domain1_Q_FAST_dy.xml +48 -48
  45. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +263 -0
  46. floodmodeller_api/test/test_data/Domain1_W.xml +41 -41
  47. floodmodeller_api/test/test_data/EX1.DAT +321 -321
  48. floodmodeller_api/test/test_data/EX1.ext +107 -107
  49. floodmodeller_api/test/test_data/EX1.feb +320 -320
  50. floodmodeller_api/test/test_data/EX1.gxy +107 -107
  51. floodmodeller_api/test/test_data/EX17.DAT +421 -422
  52. floodmodeller_api/test/test_data/EX17.ext +213 -213
  53. floodmodeller_api/test/test_data/EX17.feb +422 -422
  54. floodmodeller_api/test/test_data/EX18.DAT +375 -375
  55. floodmodeller_api/test/test_data/EX18_DAT_expected.json +3876 -0
  56. floodmodeller_api/test/test_data/EX2.DAT +302 -302
  57. floodmodeller_api/test/test_data/EX3.DAT +926 -926
  58. floodmodeller_api/test/test_data/EX3_DAT_expected.json +16235 -0
  59. floodmodeller_api/test/test_data/EX3_IEF_expected.json +61 -0
  60. floodmodeller_api/test/test_data/EX6.DAT +2084 -2084
  61. floodmodeller_api/test/test_data/EX6.ext +532 -532
  62. floodmodeller_api/test/test_data/EX6.feb +2084 -2084
  63. floodmodeller_api/test/test_data/EX6_DAT_expected.json +31647 -0
  64. floodmodeller_api/test/test_data/Event Data Example.DAT +336 -336
  65. floodmodeller_api/test/test_data/Event Data Example.ext +107 -107
  66. floodmodeller_api/test/test_data/Event Data Example.feb +336 -336
  67. floodmodeller_api/test/test_data/Linked1D2D.xml +52 -52
  68. floodmodeller_api/test/test_data/Linked1D2DFAST.xml +53 -53
  69. floodmodeller_api/test/test_data/Linked1D2DFAST_dy.xml +48 -48
  70. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +313 -0
  71. floodmodeller_api/test/test_data/blockage.dat +50 -50
  72. floodmodeller_api/test/test_data/blockage.ext +45 -45
  73. floodmodeller_api/test/test_data/blockage.feb +9 -9
  74. floodmodeller_api/test/test_data/blockage.gxy +71 -71
  75. floodmodeller_api/test/test_data/defaultUnits.dat +127 -127
  76. floodmodeller_api/test/test_data/defaultUnits.ext +45 -45
  77. floodmodeller_api/test/test_data/defaultUnits.feb +9 -9
  78. floodmodeller_api/test/test_data/defaultUnits.fmpx +58 -58
  79. floodmodeller_api/test/test_data/defaultUnits.gxy +85 -85
  80. floodmodeller_api/test/test_data/ex3.ief +20 -20
  81. floodmodeller_api/test/test_data/ex3.lf1 +2800 -2800
  82. floodmodeller_api/test/test_data/ex4.DAT +1374 -1374
  83. floodmodeller_api/test/test_data/ex4_changed.DAT +1374 -1374
  84. floodmodeller_api/test/test_data/example1.inp +329 -329
  85. floodmodeller_api/test/test_data/example2.inp +158 -158
  86. floodmodeller_api/test/test_data/example3.inp +297 -297
  87. floodmodeller_api/test/test_data/example4.inp +388 -388
  88. floodmodeller_api/test/test_data/example5.inp +147 -147
  89. floodmodeller_api/test/test_data/example6.inp +154 -154
  90. floodmodeller_api/test/test_data/jump.dat +176 -176
  91. floodmodeller_api/test/test_data/network.dat +1374 -1374
  92. floodmodeller_api/test/test_data/network.ext +45 -45
  93. floodmodeller_api/test/test_data/network.exy +1 -1
  94. floodmodeller_api/test/test_data/network.feb +45 -45
  95. floodmodeller_api/test/test_data/network.ied +45 -45
  96. floodmodeller_api/test/test_data/network.ief +20 -20
  97. floodmodeller_api/test/test_data/network.inp +147 -147
  98. floodmodeller_api/test/test_data/network.pxy +57 -57
  99. floodmodeller_api/test/test_data/network.zzd +122 -122
  100. floodmodeller_api/test/test_data/network_dat_expected.json +21837 -0
  101. floodmodeller_api/test/test_data/network_from_tabularCSV.csv +87 -87
  102. floodmodeller_api/test/test_data/network_ied_expected.json +287 -0
  103. floodmodeller_api/test/test_data/rnweir.dat +9 -9
  104. floodmodeller_api/test/test_data/rnweir.ext +45 -45
  105. floodmodeller_api/test/test_data/rnweir.feb +9 -9
  106. floodmodeller_api/test/test_data/rnweir.gxy +45 -45
  107. floodmodeller_api/test/test_data/rnweir_default.dat +74 -74
  108. floodmodeller_api/test/test_data/rnweir_default.ext +45 -45
  109. floodmodeller_api/test/test_data/rnweir_default.feb +9 -9
  110. floodmodeller_api/test/test_data/rnweir_default.fmpx +58 -58
  111. floodmodeller_api/test/test_data/rnweir_default.gxy +53 -53
  112. floodmodeller_api/test/test_data/unit checks.dat +16 -16
  113. floodmodeller_api/test/test_ied.py +29 -29
  114. floodmodeller_api/test/test_ief.py +125 -24
  115. floodmodeller_api/test/test_inp.py +47 -48
  116. floodmodeller_api/test/test_json.py +114 -0
  117. floodmodeller_api/test/test_logs_lf.py +48 -51
  118. floodmodeller_api/test/test_tool.py +165 -154
  119. floodmodeller_api/test/test_toolbox_structure_log.py +234 -239
  120. floodmodeller_api/test/test_xml2d.py +151 -156
  121. floodmodeller_api/test/test_zzn.py +36 -34
  122. floodmodeller_api/to_from_json.py +218 -0
  123. floodmodeller_api/tool.py +332 -330
  124. floodmodeller_api/toolbox/__init__.py +5 -5
  125. floodmodeller_api/toolbox/example_tool.py +45 -45
  126. floodmodeller_api/toolbox/model_build/__init__.py +2 -2
  127. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +100 -94
  128. floodmodeller_api/toolbox/model_build/structure_log/__init__.py +1 -1
  129. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +287 -289
  130. floodmodeller_api/toolbox/model_build/structure_log_definition.py +76 -72
  131. floodmodeller_api/units/__init__.py +10 -10
  132. floodmodeller_api/units/_base.py +214 -209
  133. floodmodeller_api/units/boundaries.py +467 -469
  134. floodmodeller_api/units/comment.py +52 -55
  135. floodmodeller_api/units/conduits.py +382 -403
  136. floodmodeller_api/units/helpers.py +123 -132
  137. floodmodeller_api/units/iic.py +107 -101
  138. floodmodeller_api/units/losses.py +305 -308
  139. floodmodeller_api/units/sections.py +444 -445
  140. floodmodeller_api/units/structures.py +1690 -1684
  141. floodmodeller_api/units/units.py +93 -102
  142. floodmodeller_api/units/unsupported.py +44 -44
  143. floodmodeller_api/units/variables.py +87 -89
  144. floodmodeller_api/urban1d/__init__.py +11 -11
  145. floodmodeller_api/urban1d/_base.py +188 -177
  146. floodmodeller_api/urban1d/conduits.py +93 -85
  147. floodmodeller_api/urban1d/general_parameters.py +58 -58
  148. floodmodeller_api/urban1d/junctions.py +81 -79
  149. floodmodeller_api/urban1d/losses.py +81 -74
  150. floodmodeller_api/urban1d/outfalls.py +114 -107
  151. floodmodeller_api/urban1d/raingauges.py +111 -108
  152. floodmodeller_api/urban1d/subsections.py +92 -93
  153. floodmodeller_api/urban1d/xsections.py +147 -141
  154. floodmodeller_api/util.py +77 -21
  155. floodmodeller_api/validation/parameters.py +660 -660
  156. floodmodeller_api/validation/urban_parameters.py +388 -404
  157. floodmodeller_api/validation/validation.py +110 -112
  158. floodmodeller_api/version.py +1 -1
  159. floodmodeller_api/xml2d.py +688 -684
  160. floodmodeller_api/xml2d_template.py +37 -37
  161. floodmodeller_api/zzn.py +387 -365
  162. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/LICENSE.txt +13 -13
  163. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/METADATA +82 -82
  164. floodmodeller_api-0.4.3.dist-info/RECORD +179 -0
  165. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/WHEEL +1 -1
  166. floodmodeller_api-0.4.3.dist-info/entry_points.txt +3 -0
  167. floodmodeller_api/libifcoremd.dll +0 -0
  168. floodmodeller_api/test/test_data/EX3.bmp +0 -0
  169. floodmodeller_api/test/test_data/test_output.csv +0 -87
  170. floodmodeller_api/zzn_read.dll +0 -0
  171. floodmodeller_api-0.4.2.data/scripts/fmapi-add_siltation.bat +0 -2
  172. floodmodeller_api-0.4.2.data/scripts/fmapi-add_siltation.py +0 -3
  173. floodmodeller_api-0.4.2.data/scripts/fmapi-structure_log.bat +0 -2
  174. floodmodeller_api-0.4.2.data/scripts/fmapi-structure_log.py +0 -3
  175. floodmodeller_api-0.4.2.data/scripts/fmapi-toolbox.bat +0 -2
  176. floodmodeller_api-0.4.2.data/scripts/fmapi-toolbox.py +0 -41
  177. floodmodeller_api-0.4.2.dist-info/RECORD +0 -169
  178. {floodmodeller_api-0.4.2.dist-info → floodmodeller_api-0.4.3.dist-info}/top_level.txt +0 -0
@@ -1,346 +1,354 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2023 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- import datetime as dt
18
- from abc import ABC, abstractmethod
19
-
20
- import pandas as pd
21
-
22
-
23
- class Data(ABC):
24
- def __init__(self, header: str, subheaders: list):
25
- self.header = header
26
- self.no_values = 0
27
- self._subheaders = subheaders
28
-
29
- @abstractmethod
30
- def update(self):
31
- pass
32
-
33
- @abstractmethod
34
- def get_value(self):
35
- pass
36
-
37
-
38
- class LastData(Data):
39
- def __init__(self, header: str, subheaders: list):
40
- super().__init__(header, subheaders)
41
- self._value = None # single value
42
-
43
- def update(self, data):
44
- self._value = data
45
- self.no_values = 1
46
-
47
- def get_value(self, index_key=None, index_df=None):
48
- return self._value
49
-
50
-
51
- class AllData(Data):
52
- def __init__(self, header: str, subheaders: list):
53
- super().__init__(header, subheaders)
54
- self._value = [] # list
55
-
56
- def update(self, data):
57
- self._value.append(data)
58
- self.no_values += 1
59
-
60
- def get_value(self, index_key: str = None, index_df: pd.DataFrame = None) -> pd.DataFrame:
61
- df = pd.DataFrame(self._value)
62
-
63
- # do nothing to empty dataframes
64
- if df.empty:
65
- return df
66
-
67
- # overall header
68
- if self._subheaders is None:
69
- df.rename(columns={df.columns[0]: self.header}, inplace=True)
70
-
71
- else:
72
- # subheaders
73
- df = df.set_axis(self._subheaders, axis=1)
74
-
75
- # remove duplicate of index
76
- # sometimes it includes extra values
77
- # it also has different precision
78
- index_duplicate = index_key + "_duplicate"
79
- if index_duplicate in df.columns:
80
- index_df = df[index_duplicate].round("1s")
81
-
82
- df.drop(index_duplicate, axis=1, inplace=True)
83
- # self._subheaders.remove(index_duplicate)
84
-
85
- # there is no index because *this* is the index
86
- if index_key is None:
87
- return df
88
-
89
- # made lf index the dataframe index
90
- df[index_key] = index_df
91
- df.dropna(inplace=True)
92
- df.drop_duplicates(subset=index_key, keep="last", inplace=True)
93
- df.set_index(index_key, inplace=True)
94
-
95
- return df
96
-
97
-
98
- def data_factory(data_type: str, header: str, subheaders: list = None):
99
- if data_type == "last":
100
- return LastData(header, subheaders)
101
- elif data_type == "all":
102
- return AllData(header, subheaders)
103
- else:
104
- raise ValueError(f'Unexpected data "{data_type}"')
105
-
106
-
107
- class State(ABC):
108
- def __init__(self, extracted_data):
109
- pass
110
-
111
- @abstractmethod
112
- def report_progress(self):
113
- pass
114
-
115
-
116
- class UnsteadyState(State):
117
- def __init__(self, extracted_data):
118
- self._progress_data = extracted_data["progress"].data
119
-
120
- def report_progress(self) -> float:
121
- """Returns last progress percentage"""
122
-
123
- progress = self._progress_data.get_value()
124
-
125
- if progress is None:
126
- return 0
127
-
128
- return progress
129
-
130
-
131
- class SteadyState(State):
132
- def report_progress(self):
133
- raise NotImplementedError("No progress reporting for steady simulations")
134
-
135
-
136
- def state_factory(steady: bool, extracted_data: Data) -> State:
137
- if steady is True:
138
- return SteadyState(extracted_data)
139
- else:
140
- return UnsteadyState(extracted_data)
141
-
142
-
143
- class Parser(ABC):
144
- """Abstract base class for processing and storing different types of line
145
-
146
- Args:
147
- name
148
- prefix
149
- data_type
150
- exclude
151
- is_index
152
- before_index
153
- """
154
-
155
- def __init__(
156
- self,
157
- name: str,
158
- prefix: str,
159
- data_type: str,
160
- exclude: str = None,
161
- is_index: bool = False,
162
- before_index: bool = False,
163
- ):
164
- self._name = name
165
-
166
- self.prefix = prefix
167
- self.is_index = is_index
168
- self.before_index = before_index
169
-
170
- self._exclude = exclude
171
-
172
- self.no_values = 0
173
-
174
- self.data_type = data_type
175
- self.data = data_factory(data_type, name)
176
-
177
- def process_line(self, raw_line: str):
178
- """self._process_line with exception handling of expected nan values"""
179
-
180
- try:
181
- processed_line = self._process_line(raw_line)
182
-
183
- except ValueError as e:
184
- if raw_line in self._exclude:
185
- processed_line = self._nan
186
- else:
187
- raise e
188
-
189
- self.data.update(processed_line)
190
-
191
- @abstractmethod
192
- def _process_line(self, raw: str) -> str:
193
- """Converts string to meaningful data"""
194
- pass
195
-
196
-
197
- class DateTimeParser(Parser):
198
- """Extra argument from superclass code: str"""
199
-
200
- def __init__(self, *args, **kwargs):
201
- self._code = kwargs.pop("code")
202
- super().__init__(*args, **kwargs)
203
- self._nan = pd.NaT
204
-
205
- def _process_line(self, raw: str) -> str:
206
- """Converts string to datetime"""
207
-
208
- processed = dt.datetime.strptime(raw, self._code)
209
-
210
- return processed
211
-
212
-
213
- class TimeParser(DateTimeParser):
214
- def __init__(self, *args, **kwargs):
215
- super().__init__(*args, **kwargs)
216
- self._nan = pd.NaT
217
-
218
- def _process_line(self, raw: str) -> str:
219
- """Converts string to time"""
220
-
221
- raw, _, _ = raw.partition(" ") # Temp fix to ignore '(+n d)' in EFT
222
- processed = super()._process_line(raw)
223
-
224
- return processed.time()
225
-
226
-
227
- class TimeDeltaHMSParser(Parser):
228
- def __init__(self, *args, **kwargs):
229
- super().__init__(*args, **kwargs)
230
- self._nan = pd.NaT
231
-
232
- def _process_line(self, raw: str) -> str:
233
- """Converts string HH:MM:SS to timedelta"""
234
-
235
- h, m, s = raw.split(":")
236
- processed = dt.timedelta(hours=int(h), minutes=int(m), seconds=int(s))
237
-
238
- return processed
239
-
240
-
241
- class TimeDeltaHParser(Parser):
242
- def __init__(self, *args, **kwargs):
243
- super().__init__(*args, **kwargs)
244
- self._nan = pd.NaT
245
-
246
- def _process_line(self, raw: str) -> str:
247
- """Converts string H (with decimal place and "hrs") to timedelta"""
248
-
249
- h = raw.split("hrs")[0]
250
- processed = dt.timedelta(hours=float(h))
251
-
252
- return processed
253
-
254
-
255
- class TimeDeltaSParser(Parser):
256
- def __init__(self, *args, **kwargs):
257
- super().__init__(*args, **kwargs)
258
- self._nan = pd.NaT
259
-
260
- def _process_line(self, raw: str) -> str:
261
- """Converts string S (with decimal place and "s") to timedelta"""
262
-
263
- s = raw.split("s")[0] # TODO: not necessary for simulation time
264
- processed = dt.timedelta(seconds=float(s))
265
-
266
- return processed
267
-
268
-
269
- class FloatParser(Parser):
270
- def __init__(self, *args, **kwargs):
271
- super().__init__(*args, **kwargs)
272
- self._nan = float("nan")
273
-
274
- def _process_line(self, raw: str) -> str:
275
- """Converts string to float"""
276
-
277
- processed = float(raw)
278
-
279
- return processed
280
-
281
-
282
- class FloatSplitParser(Parser):
283
- """Extra argument from superclass split: list"""
284
-
285
- def __init__(self, *args, **kwargs):
286
- self._split = kwargs.pop("split")
287
- super().__init__(*args, **kwargs)
288
- self._nan = float("nan")
289
-
290
- def _process_line(self, raw: str):
291
- """Converts string to float, removing everything after split"""
292
-
293
- processed = float(raw.split(self._split)[0])
294
-
295
- return processed
296
-
297
-
298
- class StringParser(Parser):
299
- def __init__(self, *args, **kwargs):
300
- super().__init__(*args, **kwargs)
301
- self._nan = ""
302
-
303
- def _process_line(self, raw: str):
304
- """No conversion necessary"""
305
-
306
- processed = raw
307
-
308
- return processed
309
-
310
-
311
- class StringSplitParser(Parser):
312
- """Extra argument from superclass split: str"""
313
-
314
- def __init__(self, *args, **kwargs):
315
- self._split = kwargs.pop("split")
316
- super().__init__(*args, **kwargs)
317
- self._nan = ""
318
-
319
- def _process_line(self, raw: str):
320
- """Converts string to float, removing everything after split"""
321
-
322
- processed = raw.split(self._split)[0]
323
-
324
- return processed
325
-
326
-
327
- class TimeFloatMultParser(Parser):
328
- """Extra argument from superclass names: list"""
329
-
330
- def __init__(self, *args, **kwargs):
331
- self._subheaders = kwargs.pop("subheaders")
332
- super().__init__(*args, **kwargs)
333
-
334
- self._nan = []
335
- for header in self._subheaders:
336
- self._nan.append(float("nan"))
337
-
338
- self.data = data_factory(self.data_type, self._name, self._subheaders) # overwrite
339
-
340
- def _process_line(self, raw: str):
341
- """Converts string to list of floats"""
342
-
343
- processed = [float(x) for x in raw.split()]
344
- processed[0] = dt.timedelta(hours=float(processed[0]))
345
-
346
- return processed
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import datetime as dt
20
+ from abc import ABC, abstractmethod
21
+
22
+ import pandas as pd
23
+
24
+
25
+ class Data(ABC):
26
+ def __init__(self, header: str, subheaders: list | None):
27
+ self.header = header
28
+ self.no_values = 0
29
+ self._subheaders = subheaders
30
+
31
+ @abstractmethod
32
+ def update(self):
33
+ pass
34
+
35
+ @abstractmethod
36
+ def get_value(self):
37
+ pass
38
+
39
+
40
+ class LastData(Data):
41
+ def __init__(self, header: str, subheaders: list | None):
42
+ super().__init__(header, subheaders)
43
+ self._value = None # single value
44
+
45
+ def update(self, data):
46
+ self._value = data
47
+ self.no_values = 1
48
+
49
+ def get_value(self, index_key=None, index_df=None):
50
+ return self._value
51
+
52
+
53
+ class AllData(Data):
54
+ def __init__(self, header: str, subheaders: list | None):
55
+ super().__init__(header, subheaders)
56
+ self._value: list[object] = []
57
+
58
+ def update(self, data):
59
+ self._value.append(data)
60
+ self.no_values += 1
61
+
62
+ def get_value(
63
+ self,
64
+ index_key: str | None = None,
65
+ index_df: pd.Series | None = None,
66
+ ) -> pd.DataFrame:
67
+ df = pd.DataFrame(self._value)
68
+
69
+ # do nothing to empty dataframes
70
+ if df.empty:
71
+ return df
72
+
73
+ # overall header
74
+ if self._subheaders is None:
75
+ df.rename(columns={df.columns[0]: self.header}, inplace=True)
76
+
77
+ elif index_key is not None:
78
+ # subheaders
79
+ df = df.set_axis(self._subheaders, axis=1)
80
+
81
+ # remove duplicate of index
82
+ # sometimes it includes extra values
83
+ # it also has different precision
84
+ index_duplicate = index_key + "_duplicate"
85
+ if index_duplicate in df.columns:
86
+ try:
87
+ index_df = df[index_duplicate].dt.round("1s")
88
+ df.drop(index_duplicate, axis=1, inplace=True)
89
+ except AttributeError:
90
+ df = df.drop(columns=index_duplicate)
91
+
92
+ # there is no index because *this* is the index
93
+ if index_key is None:
94
+ return df
95
+
96
+ # made lf index the dataframe index
97
+ df[index_key] = index_df
98
+ df.dropna(inplace=True)
99
+ df.drop_duplicates(subset=index_key, keep="last", inplace=True)
100
+ df.set_index(index_key, inplace=True)
101
+
102
+ return df
103
+
104
+
105
+ def data_factory(data_type: str, header: str, subheaders: list | None = None):
106
+ if data_type == "last":
107
+ return LastData(header, subheaders)
108
+ if data_type == "all":
109
+ return AllData(header, subheaders)
110
+ raise ValueError(f'Unexpected data "{data_type}"')
111
+
112
+
113
+ class State(ABC):
114
+ @abstractmethod
115
+ def __init__(self, extracted_data):
116
+ pass
117
+
118
+ @abstractmethod
119
+ def report_progress(self):
120
+ pass
121
+
122
+
123
+ class UnsteadyState(State):
124
+ def __init__(self, extracted_data):
125
+ self._progress_data = extracted_data["progress"].data
126
+
127
+ def report_progress(self) -> float:
128
+ """Returns last progress percentage"""
129
+
130
+ progress = self._progress_data.get_value()
131
+
132
+ if progress is None:
133
+ return 0
134
+
135
+ return progress
136
+
137
+
138
+ class SteadyState(State):
139
+ def __init__(self, extracted_data):
140
+ pass
141
+
142
+ def report_progress(self):
143
+ raise NotImplementedError("No progress reporting for steady simulations")
144
+
145
+
146
+ def state_factory(steady: bool, extracted_data: Data) -> State:
147
+ if steady is True:
148
+ return SteadyState(extracted_data)
149
+ return UnsteadyState(extracted_data)
150
+
151
+
152
+ class Parser(ABC):
153
+ """Abstract base class for processing and storing different types of line
154
+
155
+ Args:
156
+ name
157
+ prefix
158
+ data_type
159
+ exclude
160
+ is_index
161
+ before_index
162
+ """
163
+
164
+ _nan: object
165
+
166
+ def __init__( # noqa: PLR0913
167
+ self,
168
+ name: str,
169
+ prefix: str,
170
+ data_type: str,
171
+ exclude: str | None = None,
172
+ is_index: bool | None = False,
173
+ before_index: bool | None = False,
174
+ ):
175
+ self._name = name
176
+
177
+ self.prefix = prefix
178
+ self.is_index = is_index
179
+ self.before_index = before_index
180
+
181
+ self._exclude = exclude
182
+
183
+ self.no_values = 0
184
+
185
+ self.data_type = data_type
186
+ self.data = data_factory(data_type, name)
187
+
188
+ def process_line(self, raw_line: str) -> None:
189
+ """self._process_line with exception handling of expected nan values"""
190
+
191
+ try:
192
+ processed_line = self._process_line(raw_line)
193
+
194
+ except ValueError as e:
195
+ if self._exclude and raw_line in self._exclude:
196
+ processed_line = self._nan
197
+ else:
198
+ raise e
199
+
200
+ self.data.update(processed_line)
201
+
202
+ @abstractmethod
203
+ def _process_line(self, raw: str) -> object:
204
+ """Converts string to meaningful data"""
205
+
206
+
207
+ class DateTimeParser(Parser):
208
+ """Extra argument from superclass code: str"""
209
+
210
+ def __init__(self, *args, **kwargs):
211
+ self._code = kwargs.pop("code")
212
+ super().__init__(*args, **kwargs)
213
+ self._nan = pd.NaT
214
+
215
+ def _process_line(self, raw: str) -> dt.datetime:
216
+ """Converts string to datetime"""
217
+ return dt.datetime.strptime(raw, self._code)
218
+
219
+
220
+ class TimeParser(Parser):
221
+ """Extra argument from superclass code: str"""
222
+
223
+ def __init__(self, *args, **kwargs):
224
+ self._code = kwargs.pop("code")
225
+ super().__init__(*args, **kwargs)
226
+ self._nan = pd.NaT
227
+
228
+ def _process_line(self, raw: str) -> dt.time:
229
+ """Converts string to time"""
230
+
231
+ raw, _, _ = raw.partition(" ") # Temp fix to ignore '(+n d)' in EFT
232
+ return dt.datetime.strptime(raw, self._code).time()
233
+
234
+
235
+ class TimeDeltaHMSParser(Parser):
236
+ def __init__(self, *args, **kwargs):
237
+ super().__init__(*args, **kwargs)
238
+ self._nan = pd.NaT
239
+
240
+ def _process_line(self, raw: str) -> dt.timedelta:
241
+ """Converts string HH:MM:SS to timedelta"""
242
+
243
+ h, m, s = raw.split(":")
244
+ return dt.timedelta(hours=int(h), minutes=int(m), seconds=int(s))
245
+
246
+
247
+ class TimeDeltaHParser(Parser):
248
+ def __init__(self, *args, **kwargs):
249
+ super().__init__(*args, **kwargs)
250
+ self._nan = pd.NaT
251
+
252
+ def _process_line(self, raw: str) -> dt.timedelta:
253
+ """Converts string H (with decimal place and "hrs") to timedelta"""
254
+
255
+ h = raw.split("hrs")[0]
256
+ return dt.timedelta(hours=float(h))
257
+
258
+
259
+ class TimeDeltaSParser(Parser):
260
+ def __init__(self, *args, **kwargs):
261
+ super().__init__(*args, **kwargs)
262
+ self._nan = pd.NaT
263
+
264
+ def _process_line(self, raw: str) -> dt.timedelta:
265
+ """Converts string S (with decimal place and "s") to timedelta"""
266
+
267
+ s = raw.split("s")[0] # TODO: not necessary for simulation time
268
+ return dt.timedelta(seconds=float(s))
269
+
270
+
271
+ class FloatParser(Parser):
272
+ def __init__(self, *args, **kwargs):
273
+ super().__init__(*args, **kwargs)
274
+ self._nan = float("nan")
275
+
276
+ def _process_line(self, raw: str) -> float:
277
+ """Converts string to float"""
278
+
279
+ return float(raw)
280
+
281
+
282
+ class FloatSplitParser(Parser):
283
+ """Extra argument from superclass split: list"""
284
+
285
+ def __init__(self, *args, **kwargs):
286
+ self._split = kwargs.pop("split")
287
+ super().__init__(*args, **kwargs)
288
+ self._nan = float("nan")
289
+
290
+ def _process_line(self, raw: str) -> float:
291
+ """Converts string to float, removing everything after split"""
292
+
293
+ return float(raw.split(self._split)[0])
294
+
295
+
296
+ class StringParser(Parser):
297
+ def __init__(self, *args, **kwargs):
298
+ super().__init__(*args, **kwargs)
299
+ self._nan = ""
300
+
301
+ def _process_line(self, raw: str) -> str:
302
+ """No conversion necessary"""
303
+
304
+ return raw
305
+
306
+
307
+ class StringSplitParser(Parser):
308
+ """Extra argument from superclass split: str"""
309
+
310
+ def __init__(self, *args, **kwargs):
311
+ self._split = kwargs.pop("split")
312
+ super().__init__(*args, **kwargs)
313
+ self._nan = ""
314
+
315
+ def _process_line(self, raw: str) -> str:
316
+ """Removes everything after split"""
317
+
318
+ return raw.split(self._split)[0]
319
+
320
+
321
+ class TimeFloatMultParser(Parser):
322
+ """Extra argument from superclass names: list"""
323
+
324
+ def __init__(self, *args, **kwargs):
325
+ self._subheaders = kwargs.pop("subheaders")
326
+ super().__init__(*args, **kwargs)
327
+
328
+ self._nan = []
329
+ for _ in self._subheaders:
330
+ self._nan.append(float("nan"))
331
+
332
+ self.data = data_factory(self.data_type, self._name, self._subheaders) # overwrite
333
+
334
+ def _process_line(self, raw: str) -> list[dt.timedelta | float]:
335
+ """Converts string to list of one timedelta and then floats"""
336
+
337
+ as_float = [float(x) for x in raw.split()]
338
+ first_as_timedelta = dt.timedelta(hours=float(as_float[0]))
339
+ return [first_as_timedelta] + as_float[1:]
340
+
341
+
342
+ class TimeSplitParser(Parser):
343
+ """Extra argument from superclass code: str, split: str"""
344
+
345
+ def __init__(self, *args, **kwargs):
346
+ self._code = kwargs.pop("code")
347
+ self._split = kwargs.pop("split")
348
+ super().__init__(*args, **kwargs)
349
+ self._nan = pd.NaT
350
+
351
+ def _process_line(self, raw: str) -> dt.time:
352
+ """Converts string to time, removing everything after split"""
353
+
354
+ return dt.datetime.strptime(raw.split(self._split)[0].strip(), self._code).time()