floodmodeller-api 0.4.4.post1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. floodmodeller_api/__init__.py +1 -0
  2. floodmodeller_api/dat.py +117 -96
  3. floodmodeller_api/hydrology_plus/__init__.py +2 -0
  4. floodmodeller_api/hydrology_plus/helper.py +23 -0
  5. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +333 -0
  6. floodmodeller_api/ied.py +93 -90
  7. floodmodeller_api/ief.py +233 -50
  8. floodmodeller_api/ief_flags.py +1 -0
  9. floodmodeller_api/logs/lf.py +5 -1
  10. floodmodeller_api/mapping.py +2 -0
  11. floodmodeller_api/test/test_conveyance.py +23 -32
  12. floodmodeller_api/test/test_data/7082.ief +28 -0
  13. floodmodeller_api/test/test_data/BaseModel_2D_Q100.ief +28 -0
  14. floodmodeller_api/test/test_data/Baseline_unchecked.csv +77 -0
  15. floodmodeller_api/test/test_data/Constant QT.ief +19 -0
  16. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +7 -7
  17. floodmodeller_api/test/test_data/EX18_DAT_expected.json +54 -38
  18. floodmodeller_api/test/test_data/EX3_DAT_expected.json +246 -166
  19. floodmodeller_api/test/test_data/EX3_IEF_expected.json +25 -20
  20. floodmodeller_api/test/test_data/EX6_DAT_expected.json +522 -350
  21. floodmodeller_api/test/test_data/FEH boundary.ief +23 -0
  22. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +7 -7
  23. floodmodeller_api/test/test_data/P3Panels_UNsteady.ief +25 -0
  24. floodmodeller_api/test/test_data/QT in dat file.ief +20 -0
  25. floodmodeller_api/test/test_data/T10.ief +25 -0
  26. floodmodeller_api/test/test_data/T2.ief +25 -0
  27. floodmodeller_api/test/test_data/T5.ief +25 -0
  28. floodmodeller_api/test/test_data/df_flows_hplus.csv +56 -0
  29. floodmodeller_api/test/test_data/event_hplus.csv +56 -0
  30. floodmodeller_api/test/test_data/ex4.ief +20 -0
  31. floodmodeller_api/test/test_data/ex6.ief +21 -0
  32. floodmodeller_api/test/test_data/example_h+_export.csv +77 -0
  33. floodmodeller_api/test/test_data/hplus_export_example_1.csv +72 -0
  34. floodmodeller_api/test/test_data/hplus_export_example_10.csv +77 -0
  35. floodmodeller_api/test/test_data/hplus_export_example_2.csv +79 -0
  36. floodmodeller_api/test/test_data/hplus_export_example_3.csv +77 -0
  37. floodmodeller_api/test/test_data/hplus_export_example_4.csv +131 -0
  38. floodmodeller_api/test/test_data/hplus_export_example_5.csv +77 -0
  39. floodmodeller_api/test/test_data/hplus_export_example_6.csv +131 -0
  40. floodmodeller_api/test/test_data/hplus_export_example_7.csv +131 -0
  41. floodmodeller_api/test/test_data/hplus_export_example_8.csv +131 -0
  42. floodmodeller_api/test/test_data/hplus_export_example_9.csv +131 -0
  43. floodmodeller_api/test/test_data/network_dat_expected.json +312 -210
  44. floodmodeller_api/test/test_data/network_ied_expected.json +6 -6
  45. floodmodeller_api/test/test_data/network_with_comments.ied +55 -0
  46. floodmodeller_api/test/test_flowtimeprofile.py +133 -0
  47. floodmodeller_api/test/test_hydrology_plus_export.py +210 -0
  48. floodmodeller_api/test/test_ied.py +12 -0
  49. floodmodeller_api/test/test_ief.py +49 -9
  50. floodmodeller_api/test/test_json.py +6 -1
  51. floodmodeller_api/test/test_read_file.py +27 -0
  52. floodmodeller_api/test/test_river.py +169 -0
  53. floodmodeller_api/to_from_json.py +7 -1
  54. floodmodeller_api/tool.py +6 -10
  55. floodmodeller_api/units/__init__.py +11 -1
  56. floodmodeller_api/units/conveyance.py +101 -212
  57. floodmodeller_api/units/sections.py +120 -39
  58. floodmodeller_api/util.py +2 -0
  59. floodmodeller_api/version.py +1 -1
  60. floodmodeller_api/xml2d.py +20 -13
  61. floodmodeller_api/xsd_backup.xml +738 -0
  62. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/METADATA +2 -1
  63. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/RECORD +67 -33
  64. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/WHEEL +1 -1
  65. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/LICENSE.txt +0 -0
  66. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/entry_points.txt +0 -0
  67. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@ import pandas as pd
19
19
  from floodmodeller_api.validation import _validate_unit
20
20
 
21
21
  from ._base import Unit
22
- from .conveyance import calculate_cross_section_conveyance_chached
22
+ from .conveyance import calculate_cross_section_conveyance_cached
23
23
  from .helpers import (
24
24
  _to_float,
25
25
  _to_int,
@@ -51,12 +51,21 @@ class RIVER(Unit):
51
51
 
52
52
  Returns:
53
53
  RIVER: Flood Modeller RIVER Unit class object
54
-
55
- Methods:
56
- convert_to_muskingham: Not currently supported but planned for future release
57
54
  """
58
55
 
59
56
  _unit = "RIVER"
57
+ _required_columns = [
58
+ "X",
59
+ "Y",
60
+ "Mannings n",
61
+ "Panel",
62
+ "RPL",
63
+ "Marker",
64
+ "Easting",
65
+ "Northing",
66
+ "Deactivation",
67
+ "SP. Marker",
68
+ ]
60
69
 
61
70
  def _create_from_blank( # noqa: PLR0913
62
71
  self,
@@ -88,29 +97,18 @@ class RIVER(Unit):
88
97
  "dist_to_next": dist_to_next,
89
98
  "slope": slope,
90
99
  "density": density,
91
- "data": data,
92
100
  }.items():
93
101
  setattr(self, param, val)
94
102
 
95
- self.data = (
103
+ self._data = (
96
104
  data
97
105
  if isinstance(data, pd.DataFrame)
98
106
  else pd.DataFrame(
99
107
  [],
100
- columns=[
101
- "X",
102
- "Y",
103
- "Mannings n",
104
- "Panel",
105
- "RPL",
106
- "Marker",
107
- "Easting",
108
- "Northing",
109
- "Deactivation",
110
- "SP. Marker",
111
- ],
108
+ columns=self._required_columns,
112
109
  )
113
110
  )
111
+ self._active_data = None
114
112
 
115
113
  def _read(self, riv_block):
116
114
  """Function to read a given RIVER block and store data as class attributes."""
@@ -171,20 +169,9 @@ class RIVER(Unit):
171
169
  sp_marker,
172
170
  ],
173
171
  )
174
- self.data = pd.DataFrame(
172
+ self._data = pd.DataFrame(
175
173
  data_list,
176
- columns=[
177
- "X",
178
- "Y",
179
- "Mannings n",
180
- "Panel",
181
- "RPL",
182
- "Marker",
183
- "Easting",
184
- "Northing",
185
- "Deactivation",
186
- "SP. Marker",
187
- ],
174
+ columns=self._required_columns,
188
175
  )
189
176
 
190
177
  else:
@@ -195,6 +182,8 @@ class RIVER(Unit):
195
182
  self._raw_block = riv_block
196
183
  self.name = riv_block[2][: self._label_len].strip()
197
184
 
185
+ self._active_data = None
186
+
198
187
  def _write(self):
199
188
  """Function to write a valid RIVER block"""
200
189
 
@@ -214,7 +203,7 @@ class RIVER(Unit):
214
203
  )
215
204
  # Manual so slope can have more sf
216
205
  params = f'{self.dist_to_next:>10.3f}{"":>10}{self.slope:>10.6f}{self.density:>10.3f}'
217
- self.nrows = len(self.data)
206
+ self.nrows = len(self._data)
218
207
  riv_block = [header, self.subtype, labels, params, f"{str(self.nrows):>10}"]
219
208
 
220
209
  riv_data = []
@@ -230,7 +219,7 @@ class RIVER(Unit):
230
219
  northing,
231
220
  deactivation,
232
221
  sp_marker,
233
- ) in self.data.itertuples():
222
+ ) in self._data.itertuples():
234
223
  row = join_10_char(x, y, n)
235
224
  if panel:
236
225
  row += "*"
@@ -245,6 +234,36 @@ class RIVER(Unit):
245
234
 
246
235
  return self._raw_block
247
236
 
237
+ @property
238
+ def data(self) -> pd.DataFrame:
239
+ """Data table for the river cross section.
240
+
241
+ Returns:
242
+ pd.DataFrame: Pandas dataframe for the cross section data with columns: 'X', 'Y',
243
+ 'Mannings n', 'Panel', 'RPL', 'Marker', 'Easting', 'Northing', 'Deactivation',
244
+ 'SP. Marker'
245
+ """
246
+ if self._active_data is None:
247
+ return self._data
248
+
249
+ # Replace the active section with the self._active_data df
250
+ left_bank_idx, right_bank_idx = self._get_left_right_active_index()
251
+ self._data = pd.concat(
252
+ [self._data[:left_bank_idx], self._active_data, self._data[right_bank_idx + 1 :]],
253
+ ).reset_index(drop=True)
254
+ self._active_data = None
255
+ return self._data
256
+
257
+ @data.setter
258
+ def data(self, new_df: pd.DataFrame) -> None:
259
+ if not isinstance(new_df, pd.DataFrame):
260
+ raise ValueError(
261
+ "The updated data table for a cross section must be a pandas DataFrame.",
262
+ )
263
+ if new_df.columns != self._required_columns:
264
+ raise ValueError(f"The DataFrame must only contain columns: {self._required_columns}")
265
+ self._data = new_df
266
+
248
267
  @property
249
268
  def conveyance(self) -> pd.Series:
250
269
  """Calculate and return the conveyance curve of the cross-section.
@@ -257,14 +276,76 @@ class RIVER(Unit):
257
276
  Returns:
258
277
  pd.Series: A pandas Series containing the conveyance values indexed by water levels.
259
278
  """
260
- return calculate_cross_section_conveyance_chached(
261
- x=tuple(self.data.X.values),
262
- y=tuple(self.data.Y.values),
263
- n=tuple(self.data["Mannings n"].values),
264
- rpl=tuple(self.data.RPL.values),
265
- panel_markers=tuple(self.data.Panel.values),
279
+ return calculate_cross_section_conveyance_cached(
280
+ x=tuple(self._data.X.values),
281
+ y=tuple(self._data.Y.values),
282
+ n=tuple(self._data["Mannings n"].values),
283
+ rpl=tuple(self._data.RPL.values),
284
+ panel_markers=tuple(self._data.Panel.values),
266
285
  )
267
286
 
287
+ @property
288
+ def active_data(self) -> pd.DataFrame:
289
+ """Data table for active subset of the river cross section, defined by deactivation markers.
290
+
291
+ Returns:
292
+ pd.DataFrame: Pandas dataframe for the active cross section data with columns: 'X', 'Y',
293
+ 'Mannings n', 'Panel', 'RPL', 'Marker', 'Easting', 'Northing', 'Deactivation',
294
+ 'SP. Marker'
295
+
296
+ Example:
297
+ In this example we read in a river section that has deactivation markers
298
+
299
+ .. ipython:: python
300
+
301
+ from floodmodeller_api.units import RIVER
302
+ river_unit = RIVER(
303
+ [
304
+ "RIVER normal case",
305
+ "SECTION",
306
+ "SomeUnit",
307
+ " 0.000 0.000100 1000.000",
308
+ " 5",
309
+ " 0.000 10 0.030 0.000 0.0 0.0 ",
310
+ " 1.000 9 0.030 0.000 0.0 0.0 LEFT",
311
+ " 2.000 5 0.030 0.000 0.0 0.0 ",
312
+ " 3.000 6 0.030 0.000 0.0 0.0 RIGHT",
313
+ " 4.000 10 0.030 0.000 0.0 0.0 ",
314
+ ]
315
+ )
316
+ river_unit.data
317
+ river_unit.active_data
318
+ """
319
+ if self._active_data is not None:
320
+ return self._active_data
321
+ left_bank_idx, right_bank_idx = self._get_left_right_active_index()
322
+ self._active_data = self._data.iloc[left_bank_idx : right_bank_idx + 1].copy()
323
+ return self._active_data
324
+
325
+ @active_data.setter
326
+ def active_data(self, new_df: pd.DataFrame) -> None:
327
+ if not isinstance(new_df, pd.DataFrame):
328
+ raise ValueError(
329
+ "The updated data table for a cross section must be a pandas DataFrame.",
330
+ )
331
+ if new_df.columns.to_list() != self._required_columns:
332
+ raise ValueError(f"The DataFrame must only contain columns: {self._required_columns}")
333
+
334
+ # Ensure activation markers are present
335
+ new_df = new_df.copy()
336
+ new_df.iloc[0, 8] = "LEFT"
337
+ new_df.iloc[-1, 8] = "RIGHT"
338
+ self._active_data = new_df
339
+
340
+ def _get_left_right_active_index(self) -> tuple[int, int]:
341
+ bank_data = self._data.Deactivation.to_list()
342
+ lb_flag = "LEFT" in bank_data
343
+ rb_flag = "RIGHT" in bank_data
344
+
345
+ left_bank_idx = (len(bank_data) - 1) - bank_data[::-1].index("LEFT") if lb_flag else 0
346
+ right_bank_idx = bank_data.index("RIGHT") if rb_flag else len(bank_data) - 1
347
+ return left_bank_idx, right_bank_idx
348
+
268
349
 
269
350
  class INTERPOLATE(Unit):
270
351
  """Class to hold and process INTERPOLATE unit type
floodmodeller_api/util.py CHANGED
@@ -59,6 +59,7 @@ def read_file(filepath: str | Path) -> FMFile:
59
59
 
60
60
  """
61
61
  from . import DAT, IED, IEF, INP, LF1, LF2, XML2D, ZZN
62
+ from .hydrology_plus import HydrologyPlusExport
62
63
 
63
64
  suffix_to_class = {
64
65
  ".ief": IEF,
@@ -69,6 +70,7 @@ def read_file(filepath: str | Path) -> FMFile:
69
70
  ".inp": INP,
70
71
  ".lf1": LF1,
71
72
  ".lf2": LF2,
73
+ ".csv": HydrologyPlusExport,
72
74
  }
73
75
  filepath = Path(filepath)
74
76
  api_class = suffix_to_class.get(filepath.suffix.lower())
@@ -1 +1 @@
1
- __version__ = "0.4.4.post1"
1
+ __version__ = "0.5.0"
@@ -24,6 +24,7 @@ from pathlib import Path
24
24
  from subprocess import DEVNULL, Popen
25
25
  from typing import Callable
26
26
 
27
+ import requests
27
28
  from lxml import etree
28
29
  from tqdm import trange
29
30
 
@@ -57,7 +58,7 @@ class XML2D(FMFile):
57
58
  xml_filepath (str, optional): Full filepath to xml file.
58
59
 
59
60
  Output:
60
- Initiates 'XML' class object
61
+ Initiates 'XML2D' class object
61
62
 
62
63
  Raises:
63
64
  TypeError: Raised if xml_filepath does not point to a .xml file
@@ -66,7 +67,8 @@ class XML2D(FMFile):
66
67
 
67
68
  _filetype: str = "XML2D"
68
69
  _suffix: str = ".xml"
69
- _xsd_loc: str = "http://schema.floodmodeller.com/6.2/2d.xsd"
70
+ _xsd_loc: str = "https://schema.floodmodeller.com/7.1/2d.xsd"
71
+ _w3_schema: str = "{http://www.w3.org/2001/XMLSchema}"
70
72
  OLD_FILE = 5
71
73
  GOOD_EXIT_CODE = 100
72
74
 
@@ -88,8 +90,13 @@ class XML2D(FMFile):
88
90
  self._xmltree = etree.parse(io.StringIO(xml2d_template))
89
91
  else:
90
92
  self._xmltree = etree.parse(self._filepath)
91
- self._xsd = etree.parse(self._xsd_loc)
92
- self._xsdschema = etree.XMLSchema(self._xsd)
93
+ try:
94
+ xsd_bin = requests.get(self._xsd_loc).content
95
+ self._xsd = etree.parse(io.BytesIO(xsd_bin))
96
+ self._xsdschema = etree.XMLSchema(self._xsd)
97
+ except Exception:
98
+ self._xsd = etree.parse(Path(__file__).parent / "xsd_backup.xml")
99
+ self._xsdschema = etree.XMLSchema(self._xsd)
93
100
  self._get_multi_value_keys()
94
101
 
95
102
  self._create_dict()
@@ -175,18 +182,18 @@ class XML2D(FMFile):
175
182
  # find element in schema
176
183
  parent_name = parent.tag.replace(self._ns, "")
177
184
  schema_elem = self._xsd.find(
178
- f".//{{http://www.w3.org/2001/XMLSchema}}*[@name='{parent_name}']",
185
+ f".//{self._w3_schema}*[@name='{parent_name}']",
179
186
  )
180
187
  if "type" in schema_elem.attrib:
181
188
  schema_elem = self._xsd.find(
182
- f".//{{http://www.w3.org/2001/XMLSchema}}*[@name='{schema_elem.attrib['type']}']",
189
+ f".//{self._w3_schema}*[@name='{schema_elem.attrib['type']}']",
183
190
  )
184
191
  else:
185
- schema_elem = schema_elem.find("{http://www.w3.org/2001/XMLSchema}complexType")
192
+ schema_elem = schema_elem.find(f"{self._w3_schema}complexType")
186
193
  if schema_elem is None:
187
194
  return parent.getchildren()
188
195
 
189
- seq = schema_elem.find("{http://www.w3.org/2001/XMLSchema}sequence")
196
+ seq = schema_elem.find(f"{self._w3_schema}sequence")
190
197
  if seq is None:
191
198
  return parent.getchildren()
192
199
 
@@ -311,7 +318,7 @@ class XML2D(FMFile):
311
318
  # Check schema to see if we should use parent.set for attribute
312
319
  # or etree.subelement() and set text
313
320
  schema_elem = self._xsd.findall(
314
- f".//{{http://www.w3.org/2001/XMLSchema}}*[@name='{add_key}']",
321
+ f".//{self._w3_schema}*[@name='{add_key}']",
315
322
  )
316
323
  if len(schema_elem) == 1:
317
324
  schema_elem = schema_elem[0]
@@ -319,14 +326,14 @@ class XML2D(FMFile):
319
326
  # This is just here for when there's multiple schema elements with same
320
327
  # name, e.g. 'frequency'
321
328
  parent_schema_elem = self._xsd.find(
322
- f".//{{http://www.w3.org/2001/XMLSchema}}*[@name='{parent.tag.replace(self._ns, '')}']",
329
+ f".//{self._w3_schema}*[@name='{parent.tag.replace(self._ns, '')}']",
323
330
  )
324
331
  if "type" in parent_schema_elem.attrib:
325
332
  parent_schema_elem = self._xsd.find(
326
- f".//{{http://www.w3.org/2001/XMLSchema}}*[@name='{parent_schema_elem.attrib['type']}']",
333
+ f".//{self._w3_schema}*[@name='{parent_schema_elem.attrib['type']}']",
327
334
  )
328
335
  schema_elem = parent_schema_elem.find(
329
- f".//{{http://www.w3.org/2001/XMLSchema}}*[@name='{add_key}']",
336
+ f".//{self._w3_schema}*[@name='{add_key}']",
330
337
  )
331
338
 
332
339
  if schema_elem.tag.endswith("attribute"):
@@ -403,7 +410,7 @@ class XML2D(FMFile):
403
410
  def _get_multi_value_keys(self):
404
411
  self._multi_value_keys = []
405
412
  root = self._xsd.getroot()
406
- for elem in root.findall(".//{http://www.w3.org/2001/XMLSchema}element"):
413
+ for elem in root.findall(f".//{self._w3_schema}element"):
407
414
  if elem.attrib.get("maxOccurs") not in (None, "0", "1"):
408
415
  self._multi_value_keys.append(elem.attrib["name"])
409
416
  self._multi_value_keys = set(self._multi_value_keys)