floodmodeller-api 0.5.2.post1__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,20 @@
1
+ from pathlib import Path
2
+
3
+ import pytest
4
+
5
+ from floodmodeller_api import read_file
6
+
7
+
8
+ @pytest.mark.parametrize(
9
+ "file",
10
+ ["BRIDGE.DAT", "network.dat", "Domain1_W.xml", "T5.ief", "network.ied"],
11
+ )
12
+ def test_crlf_line_endings(test_workspace: Path, tmp_path: Path, file: str):
13
+ obj = read_file(test_workspace / file)
14
+ new_path = tmp_path / file
15
+ obj.save(new_path)
16
+ with open(new_path, "rb") as f:
17
+ contents = f.readlines()
18
+
19
+ # Check all line endings except last
20
+ assert all(line.endswith(b"\r\n") for line in contents[:-1])
@@ -11,28 +11,34 @@ from floodmodeller_api.logs import create_lf
11
11
 
12
12
 
13
13
  @pytest.fixture()
14
- def lf1_fp(test_workspace: Path) -> Path:
14
+ def lf1_fp_simple(test_workspace: Path) -> Path:
15
15
  return Path(test_workspace, "ex3.lf1")
16
16
 
17
17
 
18
- def test_lf1_info_dict(lf1_fp: Path):
18
+ @pytest.fixture()
19
+ def lf1_fp_complex(test_workspace: Path) -> Path:
20
+ return Path(test_workspace, "lf_complex_ex.lf1")
21
+
22
+
23
+ def test_lf1_info_dict(lf1_fp_simple: Path):
19
24
  """LF1: Check info dictionary"""
20
- lf1 = LF1(lf1_fp)
25
+ lf1 = LF1(lf1_fp_simple)
21
26
  assert lf1.info["version"] == "5.0.0.7752"
22
27
  assert lf1.info["max_system_volume"] == 270549
23
28
  assert lf1.info["mass_balance_error"] == -0.03
24
29
  assert lf1.info["progress"] == 100
30
+ assert lf1.info["total_boundary_inflow"] == 5506290
25
31
 
26
32
 
27
- def test_lf1_report_progress(lf1_fp: Path):
33
+ def test_lf1_report_progress(lf1_fp_simple: Path):
28
34
  """LF1: Check report_progress()"""
29
- lf1 = LF1(lf1_fp)
35
+ lf1 = LF1(lf1_fp_simple)
30
36
  assert lf1.report_progress() == 100
31
37
 
32
38
 
33
- def test_lf1_to_dataframe(lf1_fp: Path):
39
+ def test_lf1_to_dataframe(lf1_fp_simple: Path):
34
40
  """LF1: Check to_dataframe()"""
35
- lf1 = LF1(lf1_fp)
41
+ lf1 = LF1(lf1_fp_simple)
36
42
  lf1_df = lf1.to_dataframe(variable="all")
37
43
 
38
44
  assert lf1_df.loc[lf1_df.index[0], "iter"] == 6
@@ -56,9 +62,9 @@ def test_lf1_to_dataframe(lf1_fp: Path):
56
62
  assert lf1_tuflow_df[col].isna().all() # there is no tuflow in this lf1
57
63
 
58
64
 
59
- def test_lf1_from_ief(lf1_fp: Path, test_workspace: Path):
65
+ def test_lf1_from_ief(lf1_fp_simple: Path, test_workspace: Path):
60
66
  """LF1: Check IEF.get_log()"""
61
- lf1 = LF1(lf1_fp)
67
+ lf1 = LF1(lf1_fp_simple)
62
68
 
63
69
  ief_fp = Path(test_workspace, "ex3.ief")
64
70
  ief = IEF(ief_fp)
@@ -76,7 +82,7 @@ def test_log_file_unsupported(caplog):
76
82
  assert lf is None
77
83
  assert (
78
84
  caplog.text
79
- == "WARNING root:lf.py:325 No progress bar as log file must have suffix lf1 or lf2. Simulation will continue as usual.\n"
85
+ == "WARNING root:lf.py:332 No progress bar as log file must have suffix lf1 or lf2. Simulation will continue as usual.\n"
80
86
  )
81
87
 
82
88
 
@@ -90,7 +96,7 @@ def test_log_file_timeout(caplog):
90
96
  assert lf is None
91
97
  assert (
92
98
  caplog.text
93
- == "WARNING root:lf.py:325 No progress bar as log file is expected but not detected. Simulation will continue as usual.\n"
99
+ == "WARNING root:lf.py:332 No progress bar as log file is expected but not detected. Simulation will continue as usual.\n"
94
100
  )
95
101
 
96
102
 
@@ -106,7 +112,7 @@ def test_log_file_from_old_run(caplog):
106
112
  assert lf is None
107
113
  assert (
108
114
  caplog.text
109
- == "WARNING root:lf.py:325 No progress bar as log file is from previous run. Simulation will continue as usual.\n"
115
+ == "WARNING root:lf.py:332 No progress bar as log file is from previous run. Simulation will continue as usual.\n"
110
116
  )
111
117
 
112
118
 
@@ -121,3 +127,42 @@ def test_log_file_found():
121
127
 
122
128
  assert lf is not None
123
129
  lf1.assert_called_once_with(lf_filepath)
130
+
131
+
132
+ def test_lf1_info_dict_all_params_present(lf1_fp_complex: Path):
133
+ """LF1: Check info dictionary contains all params required"""
134
+ lf1 = LF1(lf1_fp_complex)
135
+ expected_keys = {
136
+ "version",
137
+ "number_of_1D_river_nodes",
138
+ "qtol",
139
+ "htol",
140
+ "start_time",
141
+ "end_time",
142
+ "ran_at",
143
+ "max_itr",
144
+ "min_itr",
145
+ "progress",
146
+ "EFT",
147
+ "ETR",
148
+ "simulation_time_elapsed",
149
+ "number_of_unconverged_timesteps",
150
+ "proportion_of_simulation_unconverged",
151
+ "mass_balance_calculated_every",
152
+ "initial_volume",
153
+ "final_volume",
154
+ "total_boundary_inflow",
155
+ "total_boundary_outflow",
156
+ "total_lat_link_inflow",
157
+ "total_lat_link_outflow",
158
+ "max_system_volume",
159
+ "max_volume_increase",
160
+ "max_boundary_inflow",
161
+ "max_boundary_outflow",
162
+ "net_volume_increase",
163
+ "net_inflow_volume",
164
+ "volume_discrepancy",
165
+ "mass_balance_error",
166
+ "mass_balance_error_2",
167
+ }
168
+ assert expected_keys == lf1.info.keys()
@@ -0,0 +1,57 @@
1
+ from __future__ import annotations
2
+
3
+ import pytest
4
+
5
+ from floodmodeller_api.units._base import Unit # update this import path to match your repo
6
+
7
+
8
+ class DummyUnit(Unit):
9
+ def __init__(self, unit_value: str):
10
+ self._unit = unit_value
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ ("unit", "header", "expected_revision", "expected_comment"),
15
+ [
16
+ ("RESERVOIR", "RESERVOIR 45678 This is a comment", None, "45678 This is a comment"),
17
+ ("RESERVOIR", "RESERVOIR #revision#1 Mr Comment123", 1, "Mr Comment123"),
18
+ ("LATERAL", "LATERAL #revision#1", 1, ""),
19
+ ("RIVER", "RIVER look at this lovely RIVER", None, "look at this lovely RIVER"),
20
+ ],
21
+ )
22
+ def test_get_revision_and_comment(
23
+ unit: str,
24
+ header: str,
25
+ expected_revision: int | None,
26
+ expected_comment: str,
27
+ ):
28
+ dummy_unit = DummyUnit(unit)
29
+ revision, comment = dummy_unit._get_revision_and_comment(header)
30
+ assert revision == expected_revision
31
+ assert comment == expected_comment
32
+
33
+
34
+ @pytest.mark.parametrize(
35
+ ("unit", "header", "remove_revision", "expected_result"),
36
+ [
37
+ ("RESERVOIR", "RESERVOIR 45678 This is a comment", True, "45678 This is a comment"),
38
+ ("RESERVOIR", "RESERVOIR #revision#1 Mr Comment123", True, "Mr Comment123"),
39
+ (
40
+ "LATERAL",
41
+ "LATERAL #revision#1 another #revision#1 tag",
42
+ True,
43
+ "another #revision#1 tag",
44
+ ),
45
+ (
46
+ "LATERAL",
47
+ "LATERAL #revision#1 another #revision#1 tag",
48
+ False,
49
+ "#revision#1 another #revision#1 tag",
50
+ ),
51
+ ("RIVER", "RIVER look at this lovely RIVER", False, "look at this lovely RIVER"),
52
+ ],
53
+ )
54
+ def test_remove_unit_name(unit: str, header: str, remove_revision: bool, expected_result: str):
55
+ dummy_unit = DummyUnit(unit)
56
+ result = dummy_unit._remove_unit_name(header, remove_revision=remove_revision)
57
+ assert result == expected_result
@@ -12,6 +12,7 @@ from .structures import (
12
12
  BRIDGE,
13
13
  CRUMP,
14
14
  FLAT_V_WEIR,
15
+ FLOODPLAIN,
15
16
  ORIFICE,
16
17
  OUTFALL,
17
18
  RNWEIR,
@@ -30,6 +31,6 @@ TConnectors: TypeAlias = JUNCTION | LATERAL
30
31
  TControls: TypeAlias = RESERVOIR
31
32
  TLosses: TypeAlias = BLOCKAGE | CULVERT
32
33
  TStructures: TypeAlias = (
33
- BRIDGE | CRUMP | FLAT_V_WEIR | ORIFICE | OUTFALL | RNWEIR | SLUICE | SPILL | WEIR
34
+ BRIDGE | CRUMP | FLAT_V_WEIR | ORIFICE | OUTFALL | RNWEIR | SLUICE | SPILL | WEIR | FLOODPLAIN
34
35
  )
35
36
  TUnsupported: TypeAlias = UNSUPPORTED
@@ -18,7 +18,9 @@ from __future__ import annotations
18
18
 
19
19
  """ Holds the base unit class for all FM Units """
20
20
 
21
+ import contextlib
21
22
  import logging
23
+ import re
22
24
  from itertools import chain
23
25
  from typing import Any
24
26
 
@@ -26,7 +28,7 @@ import pandas as pd
26
28
 
27
29
  from ..diff import check_item_with_dataframe_equal
28
30
  from ..to_from_json import Jsonable
29
- from ._helpers import join_10_char, join_n_char_ljust, split_10_char, to_float, to_int, to_str
31
+ from ._helpers import join_10_char, join_n_char_ljust, split_10_char, to_float, to_str
30
32
 
31
33
 
32
34
  class Unit(Jsonable):
@@ -138,6 +140,10 @@ class Unit(Jsonable):
138
140
 
139
141
  result = True
140
142
  diff = []
143
+ # Reset data attributes before checking equivalent
144
+ with contextlib.suppress(AttributeError):
145
+ _ = self.data, other.data
146
+
141
147
  result, diff = check_item_with_dataframe_equal(
142
148
  self.__dict__,
143
149
  other.__dict__,
@@ -257,7 +263,7 @@ class Unit(Jsonable):
257
263
  def _remove_unit_name(self, line: str, *, remove_revision: bool = False) -> str:
258
264
  line = line.replace(self._unit, "", 1)
259
265
  if remove_revision:
260
- line = line.replace("#revision#", "", 1)
266
+ line = re.sub(r"^\s*#revision#\d+\s*", "", line)
261
267
  return line.strip()
262
268
 
263
269
  def _create_header(self, *, include_revision: bool = False) -> str:
@@ -272,10 +278,16 @@ class Unit(Jsonable):
272
278
  return line.split(" ")[0].strip()
273
279
 
274
280
  def _get_revision_and_comment(self, line: str) -> tuple[int | None, str]:
275
- line_without_name = self._remove_unit_name(line, remove_revision=True)
276
- revision = to_int(line_without_name[0], None) if line_without_name != "" else None
277
- comment = line_without_name[1:].strip()
278
- return revision, comment
281
+ unit = re.escape(self._unit)
282
+ pattern = rf"^{unit}(?:\s+#revision#(\d+))?(?:\s+(.*))?$"
283
+
284
+ match = re.match(pattern, line.strip())
285
+ if not match:
286
+ return None, ""
287
+
288
+ revision_str, comment = match.groups()
289
+ revision = int(revision_str) if revision_str else None
290
+ return revision, comment or ""
279
291
 
280
292
  def _enforce_dataframe(self, data: Any, columns: tuple[str, ...]) -> pd.DataFrame:
281
293
  return data if isinstance(data, pd.DataFrame) else pd.DataFrame([], columns=columns)
@@ -112,7 +112,7 @@ class RIVER(Unit):
112
112
 
113
113
  self._subtype = riv_block[1].split(" ")[0].strip()
114
114
  # Extends label line to be correct length before splitting to pick up blank labels
115
- labels = split_n_char(f"{riv_block[2]:<{7*self._label_len}}", self._label_len)
115
+ labels = split_n_char(f"{riv_block[2]:<{7 * self._label_len}}", self._label_len)
116
116
 
117
117
  # Only supporting 'SECTION' subtype for now
118
118
  if self.subtype == "SECTION":
@@ -203,7 +203,7 @@ class RIVER(Unit):
203
203
  self.lat4,
204
204
  )
205
205
  # Manual so slope can have more sf
206
- params = f'{self.dist_to_next:>10.3f}{"":>10}{self.slope:>10.6f}{self.density:>10.3f}'
206
+ params = f"{self.dist_to_next:>10.3f}{'':>10}{self.slope:>10.6f}{self.density:>10.3f}"
207
207
  self.nrows = len(self._data)
208
208
  riv_block = [header, self.subtype, labels, params, f"{self.nrows!s:>10}"]
209
209
 
@@ -373,7 +373,7 @@ class INTERPOLATE(Unit):
373
373
  """Function to read a given INTERPOLATE WEIR block and store data as class attributes"""
374
374
 
375
375
  # Extends label line to be correct length before splitting to pick up blank labels
376
- labels = split_n_char(f"{block[1]:<{7*self._label_len}}", self._label_len)
376
+ labels = split_n_char(f"{block[1]:<{7 * self._label_len}}", self._label_len)
377
377
  self.name = labels[0]
378
378
  self.first_spill = labels[1]
379
379
  self.second_spill = labels[2]
@@ -468,7 +468,7 @@ class REPLICATE(Unit):
468
468
  """Function to read a given REPLICATE block and store data as class attributes"""
469
469
 
470
470
  # Extends label line to be correct length before splitting to pick up blank labels
471
- labels = split_n_char(f"{block[1]:<{7*self._label_len}}", self._label_len)
471
+ labels = split_n_char(f"{block[1]:<{7 * self._label_len}}", self._label_len)
472
472
  self.name = labels[0]
473
473
  self.first_spill = labels[1]
474
474
  self.second_spill = labels[2]
@@ -1518,3 +1518,145 @@ class OUTFALL(Unit):
1518
1518
  "modular_limit": modular_limit,
1519
1519
  }.items():
1520
1520
  setattr(self, param, val)
1521
+
1522
+
1523
+ class FLOODPLAIN(Unit):
1524
+ """Class to hold and process FLOODPLAIN unit type.
1525
+
1526
+ Args:
1527
+ name (str, optional): FLOODPLAIN section name
1528
+ comment (str, optional): Comment included in unit
1529
+ ds_label (str, optional): Downstream node label
1530
+ data (pandas.Dataframe, optional): Dataframe object containing all the floodplain section data as well as all other relevant data.
1531
+ Columns are ``'X', 'Y', 'Mannings n', 'Easting', 'Northing'``
1532
+ calibration_coefficient (float, optional): Weir coefficient (includes discharge, velocity and calibration coefficients, optional)
1533
+ modular_limit (float, optional): Ratio of upstream and downstream heads when switching between free and drowned mode
1534
+ upstream_separation (float, optional): Distance from centre of upstream cell to section (m)
1535
+ downstream_separation (float, optional): Distance from section to centre of downstream cell (m)
1536
+ force_friction_flow (bool, optional): Force friction flow for all segments
1537
+ ds_area_constraint (float, optional): Minimum value of downstream area (relative to upstream area) when Manning's equation applies. Typical value 0.1.
1538
+
1539
+ Returns:
1540
+ FLOODPLAIN: Flood Modeller FLOODPLAIN Unit class object
1541
+ """
1542
+
1543
+ _unit = "FLOODPLAIN"
1544
+ _required_columns = (
1545
+ "X",
1546
+ "Y",
1547
+ "Mannings n",
1548
+ "Easting",
1549
+ "Northing",
1550
+ )
1551
+
1552
+ def _read(self, fp_block):
1553
+ """Function to read a given FLOODPLAIN block and store data as class attributes."""
1554
+
1555
+ self._subtype = self._get_first_word(fp_block[1])
1556
+ # Extends label line to be correct length before splitting to pick up blank labels
1557
+ labels = split_n_char(f"{fp_block[2]:<{7 * self._label_len}}", self._label_len)
1558
+ self.name = labels[0]
1559
+ self.ds_label = labels[1]
1560
+ self.comment = self._remove_unit_name(fp_block[0])
1561
+
1562
+ params = split_10_char(f"{fp_block[3]:<60}")
1563
+ self.calibration_coefficient = to_float(params[0])
1564
+ self.modular_limit = to_float(params[1])
1565
+ self.upstream_separation = to_float(params[2])
1566
+ self.downstream_separation = to_float(params[3])
1567
+ self.force_friction_flow = params[4].upper() == "FRICTION"
1568
+ self.ds_area_constraint = to_float(params[5])
1569
+
1570
+ self.nrows = int(split_10_char(fp_block[4])[0])
1571
+ data_list = []
1572
+ for row in fp_block[5:]:
1573
+ row_split = split_10_char(f"{row:<50}")
1574
+ x = to_float(row_split[0]) # chainage
1575
+ y = to_float(row_split[1]) # elevation
1576
+ n = to_float(row_split[2]) # Mannings
1577
+ easting = to_float(row_split[3]) # easting
1578
+ northing = to_float(row_split[4]) # northing
1579
+
1580
+ data_list.append(
1581
+ [
1582
+ x,
1583
+ y,
1584
+ n,
1585
+ easting,
1586
+ northing,
1587
+ ],
1588
+ )
1589
+ self._data = pd.DataFrame(
1590
+ data_list,
1591
+ columns=self._required_columns,
1592
+ )
1593
+
1594
+ def _write(self):
1595
+ """Function to write a valid FLOODPLAIN block"""
1596
+
1597
+ # Function to check the params are valid for FLOODPLAIN SECTION unit
1598
+ _validate_unit(self)
1599
+ header = self._create_header()
1600
+ labels = join_n_char_ljust(self._label_len, self.name, self.ds_label)
1601
+ # Manual so slope can have more sf
1602
+ params = join_10_char(
1603
+ self.calibration_coefficient,
1604
+ self.modular_limit,
1605
+ self.upstream_separation,
1606
+ self.downstream_separation,
1607
+ "FRICTION" if self.force_friction_flow else "",
1608
+ self.ds_area_constraint,
1609
+ )
1610
+ self.nrows = len(self._data)
1611
+ return [header, self.subtype, labels, params, *write_dataframe(self.nrows, self._data)]
1612
+
1613
+ def _create_from_blank( # noqa: PLR0913
1614
+ self,
1615
+ name="new_floodplain",
1616
+ comment="",
1617
+ ds_label="",
1618
+ data=None,
1619
+ calibration_coefficient=1.0,
1620
+ modular_limit=0.8,
1621
+ upstream_separation=0.0,
1622
+ downstream_separation=0.0,
1623
+ force_friction_flow=False,
1624
+ ds_area_constraint=0.1,
1625
+ ):
1626
+ # Initiate new FLOODPLAIN (currently hardcoding this as default)
1627
+ self._subtype = "SECTION"
1628
+
1629
+ for param, val in {
1630
+ "name": name,
1631
+ "comment": comment,
1632
+ "ds_label": ds_label,
1633
+ "calibration_coefficient": calibration_coefficient,
1634
+ "modular_limit": modular_limit,
1635
+ "upstream_separation": upstream_separation,
1636
+ "downstream_separation": downstream_separation,
1637
+ "force_friction_flow": force_friction_flow,
1638
+ "ds_area_constraint": ds_area_constraint,
1639
+ }.items():
1640
+ setattr(self, param, val)
1641
+
1642
+ self._data = self._enforce_dataframe(data, self._required_columns)
1643
+
1644
+ @property
1645
+ def data(self) -> pd.DataFrame:
1646
+ """Data table for the FLOODPLAIN cross section.
1647
+
1648
+ Returns:
1649
+ pd.DataFrame: Pandas dataframe for the cross section data with columns: 'X', 'Y',
1650
+ 'Mannings n','Easting', 'Northing'
1651
+ """
1652
+ return self._data
1653
+
1654
+ @data.setter
1655
+ def data(self, new_df: pd.DataFrame) -> None:
1656
+ if not isinstance(new_df, pd.DataFrame):
1657
+ msg = "The updated data table for a floodplain section must be a pandas DataFrame."
1658
+ raise TypeError(msg)
1659
+ if list(map(str.lower, new_df.columns)) != list(map(str.lower, self._required_columns)):
1660
+ msg = f"The DataFrame must only contain columns: {self._required_columns}"
1661
+ raise ValueError(msg)
1662
+ self._data = new_df
@@ -56,6 +56,7 @@ SUPPORTED_UNIT_TYPES: dict[str, SupportedUnitTypes] = {
56
56
  "JUNCTION": {"group": "connectors", "has_subtype": True},
57
57
  "LATERAL": {"group": "connectors", "has_subtype": False},
58
58
  "RESERVOIR": {"group": "controls", "has_subtype": False},
59
+ "FLOODPLAIN": {"group": "structures", "has_subtype": True},
59
60
  }
60
61
 
61
62
  UNSUPPORTED_UNIT_TYPES: dict[str, UnsupportedUnitTypes] = {
@@ -65,7 +66,6 @@ UNSUPPORTED_UNIT_TYPES: dict[str, UnsupportedUnitTypes] = {
65
66
  "FEHBDY": {"group": "boundaries", "has_subtype": False}, # RAINFALL RUNOFF METHOD boundary
66
67
  "FLOOD RELIEF": {"group": None, "has_subtype": True}, # found in dat file
67
68
  "FLOOD RELIEF ARCH": {"group": "structures", "has_subtype": True}, # found in FM help
68
- "FLOODPLAIN": {"group": None, "has_subtype": True}, # floodplain section culvert
69
69
  "FRQSIM": {"group": "boundaries", "has_subtype": False}, # flood FReQuency SIMulation
70
70
  "FSRBDY": {"group": "boundaries", "has_subtype": False}, # FEH Rainfall Runoff Method
71
71
  "FSSR16BDY": {"group": "boundaries", "has_subtype": False}, # FSSR16 Method
@@ -661,4 +661,20 @@ parameter_options = {
661
661
  "type": "list-string-length",
662
662
  "max_length": 12,
663
663
  },
664
+ "upstream_separation": {
665
+ "type": "type-match",
666
+ "options": (float, int),
667
+ },
668
+ "downstream_separation": {
669
+ "type": "type-match",
670
+ "options": (float, int),
671
+ },
672
+ "force_friction_flow": {
673
+ "type": "type-match",
674
+ "options": (bool),
675
+ },
676
+ "ds_area_constraint": {
677
+ "type": "type-match",
678
+ "options": (float, int),
679
+ },
664
680
  }
@@ -1 +1 @@
1
- __version__ = "0.5.2.post1"
1
+ __version__ = "0.5.3"
@@ -442,7 +442,7 @@ class XML2D(FMFile):
442
442
  # Update XML dict and tree
443
443
  self._read()
444
444
 
445
- def save(self, filepath: str | Path | None):
445
+ def save(self, filepath: str | Path) -> None:
446
446
  """Saves the XML to the given location, if pointing to an existing file it will be overwritten.
447
447
  Once saved, the XML() class will continue working from the saved location, therefore any further calls to XML.update() will
448
448
  update in the latest saved location rather than the original source XML used to construct the class
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: floodmodeller_api
3
- Version: 0.5.2.post1
3
+ Version: 0.5.3
4
4
  Summary: Extends the functionality of Flood Modeller to python users
5
5
  Author: Jacobs
6
6
  Author-email: joe.pierce@jacobs.com
@@ -24,6 +24,7 @@ Dynamic: author-email
24
24
  Dynamic: description
25
25
  Dynamic: description-content-type
26
26
  Dynamic: license
27
+ Dynamic: license-file
27
28
  Dynamic: project-url
28
29
  Dynamic: requires-dist
29
30
  Dynamic: summary
@@ -60,7 +61,8 @@ You can install the floodmodeller_api package from PyPI with the following comma
60
61
  pip install floodmodeller-api
61
62
  ```
62
63
 
63
- Python 3.10 or greater is required.
64
+ Flood Modeller API is regularly tested for all versions of python 3.10 onwards. Although it may work for older versions of python, we recommend using at least v3.10 and ideally the latest
65
+ stable release.
64
66
 
65
67
  Once you have installed floodmodeller_api to your python environment, you can import the package with:
66
68