floodmodeller-api 0.5.3.post2__py3-none-any.whl → 0.5.5.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. floodmodeller_api/dat.py +41 -4
  2. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +1 -2
  3. floodmodeller_api/ied.py +1 -1
  4. floodmodeller_api/test/test_dat.py +16 -1
  5. floodmodeller_api/test/test_data/River_Bridge.dat +1453 -0
  6. floodmodeller_api/test/test_data/River_Bridge.gxy +221 -0
  7. floodmodeller_api/test/test_data/River_Bridge_DAT_expected.json +27273 -0
  8. floodmodeller_api/test/test_data/River_Bridge_no_gxy.dat +1453 -0
  9. floodmodeller_api/test/test_data/River_Bridge_no_gxy_DAT_expected.json +26853 -0
  10. floodmodeller_api/test/test_gxy.py +98 -0
  11. floodmodeller_api/test/test_json.py +37 -2
  12. floodmodeller_api/test/test_unit.py +12 -0
  13. floodmodeller_api/to_from_json.py +16 -2
  14. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +8 -8
  15. floodmodeller_api/units/_base.py +30 -0
  16. floodmodeller_api/units/boundaries.py +4 -1
  17. floodmodeller_api/units/conduits.py +1 -1
  18. floodmodeller_api/units/losses.py +2 -2
  19. floodmodeller_api/units/sections.py +36 -0
  20. floodmodeller_api/units/structures.py +60 -13
  21. floodmodeller_api/units/unsupported.py +2 -2
  22. floodmodeller_api/validation/validation.py +6 -6
  23. floodmodeller_api/version.py +1 -1
  24. {floodmodeller_api-0.5.3.post2.dist-info → floodmodeller_api-0.5.5.post1.dist-info}/METADATA +1 -1
  25. {floodmodeller_api-0.5.3.post2.dist-info → floodmodeller_api-0.5.5.post1.dist-info}/RECORD +29 -23
  26. {floodmodeller_api-0.5.3.post2.dist-info → floodmodeller_api-0.5.5.post1.dist-info}/WHEEL +0 -0
  27. {floodmodeller_api-0.5.3.post2.dist-info → floodmodeller_api-0.5.5.post1.dist-info}/entry_points.txt +0 -0
  28. {floodmodeller_api-0.5.3.post2.dist-info → floodmodeller_api-0.5.5.post1.dist-info}/licenses/LICENSE.txt +0 -0
  29. {floodmodeller_api-0.5.3.post2.dist-info → floodmodeller_api-0.5.5.post1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,98 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import pytest
6
+
7
+ import floodmodeller_api.units
8
+ from floodmodeller_api import DAT
9
+ from floodmodeller_api.units import (
10
+ FLOODPLAIN,
11
+ INTERPOLATE,
12
+ JUNCTION,
13
+ QTBDY,
14
+ REPLICATE,
15
+ RESERVOIR,
16
+ RIVER,
17
+ SPILL,
18
+ )
19
+
20
+
21
+ # this would be a fixture but doesnt work when used in parameterised test.
22
+ def blank_with_location(unit_class, *args, **kwargs):
23
+ unit = unit_class(*args, **kwargs)
24
+ unit._location = (461193.10, 339088.74)
25
+ return unit
26
+
27
+
28
+ @pytest.mark.parametrize(
29
+ ("unit", "expected_outcome"),
30
+ [
31
+ (RIVER(), None),
32
+ (QTBDY(), None),
33
+ (INTERPOLATE(), None),
34
+ (INTERPOLATE(easting=123.4, northing=987.6), (123.4, 987.6)),
35
+ (REPLICATE(), None),
36
+ (REPLICATE(easting=123.4, northing=987.6), (123.4, 987.6)),
37
+ (RESERVOIR(), None),
38
+ (RESERVOIR(easting=123.4, northing=987.6), (123.4, 987.6)),
39
+ (SPILL(), None),
40
+ (FLOODPLAIN(), None),
41
+ (blank_with_location(QTBDY), (461193.10, 339088.74)),
42
+ (blank_with_location(RIVER), (461193.10, 339088.74)),
43
+ (blank_with_location(INTERPOLATE), (461193.10, 339088.74)),
44
+ (blank_with_location(INTERPOLATE, easting=123.4, northing=987.6), (461193.10, 339088.74)),
45
+ (blank_with_location(SPILL), (461193.10, 339088.74)),
46
+ (blank_with_location(FLOODPLAIN), (461193.10, 339088.74)),
47
+ ],
48
+ )
49
+ def test_unit_location(unit, expected_outcome):
50
+ assert unit.location == expected_outcome
51
+
52
+
53
+ def get_supported_unit_classes():
54
+ all_unit_classes = []
55
+ for unit_type, attributes in floodmodeller_api.units.SUPPORTED_UNIT_TYPES.items():
56
+ if attributes["group"] not in ("other", "comments"):
57
+ unit_type_safe = unit_type.replace(" ", "_").replace("-", "_")
58
+ # Borrowed replacing idea from .dat
59
+ unit_class = getattr(floodmodeller_api.units, unit_type_safe)
60
+ all_unit_classes.append(unit_class)
61
+ return all_unit_classes
62
+
63
+
64
+ SUPPORTED_UNIT_CLASSES = get_supported_unit_classes()
65
+
66
+
67
+ @pytest.mark.parametrize("unit_class", SUPPORTED_UNIT_CLASSES)
68
+ def test_setting_location(unit_class):
69
+ # first check that we get the not implemented error, then check that the location is still unaffected.
70
+ # this test should be updated when location is read/write capable.
71
+ try:
72
+ # Junction units cannot be created from blank without at least one label.
73
+ unit = unit_class(labels=["label1"]) if unit_class == JUNCTION else unit_class()
74
+ except NotImplementedError as error:
75
+ pytest.skip(f"Creating unit {unit_class=} from blank not supported, skipping...\n{error=}")
76
+
77
+ with pytest.raises(NotImplementedError):
78
+ unit.location = (461382.54, 339188.26)
79
+
80
+ assert unit.location is None
81
+ assert unit._location is None
82
+
83
+
84
+ @pytest.mark.parametrize(
85
+ ("dat_name", "group", "label", "expected_outcome"),
86
+ [
87
+ ("EX1.DAT", "sections", "S4", (-38203.94169253, 153846.153846154)),
88
+ ("River_Bridge_no_gxy.dat", "sections", "M029", (385029.200, 242717.100)),
89
+ ("River_Bridge_no_gxy.dat", "sections", "M030", (384689.300, 242345.700)),
90
+ ("River_Bridge_no_gxy.dat", "sections", "M031", (384545.000, 241937.000)),
91
+ ("River_Bridge_no_gxy.dat", "structures", "M047spU", (386710.9, 236857.85)),
92
+ ],
93
+ )
94
+ def test_unit_from_dat(test_workspace, dat_name, group, label, expected_outcome):
95
+ dat_path = Path(test_workspace, dat_name)
96
+ dat = DAT(dat_path)
97
+ unit = getattr(dat, group)[label]
98
+ assert unit.location == expected_outcome
@@ -8,6 +8,13 @@ import pytest
8
8
 
9
9
  from floodmodeller_api import DAT, IED, IEF, INP, XML2D
10
10
  from floodmodeller_api.to_from_json import is_jsonable
11
+ from floodmodeller_api.units import (
12
+ FLOODPLAIN,
13
+ INTERPOLATE,
14
+ QTBDY,
15
+ RIVER,
16
+ SPILL,
17
+ )
11
18
  from floodmodeller_api.util import read_file
12
19
 
13
20
  if TYPE_CHECKING:
@@ -65,6 +72,14 @@ def parameterised_objs_and_expected(test_workspace) -> list[tuple[FMFile, Path]]
65
72
  (IEF(test_workspace / "ex3.ief"), test_workspace / "EX3_IEF_expected.json"),
66
73
  (XML2D(test_workspace / "Domain1_Q.xml"), test_workspace / "Domain1_Q_xml_expected.json"),
67
74
  (XML2D(test_workspace / "Linked1D2D.xml"), test_workspace / "Linked1D2D_xml_expected.json"),
75
+ (
76
+ DAT(test_workspace / "River_Bridge.dat"),
77
+ test_workspace / "River_Bridge_DAT_expected.json",
78
+ ),
79
+ (
80
+ DAT(test_workspace / "River_Bridge_no_gxy.dat"),
81
+ test_workspace / "River_Bridge_no_gxy_DAT_expected.json",
82
+ ),
68
83
  ]
69
84
 
70
85
 
@@ -80,7 +95,7 @@ def test_to_json_matches_expected(parameterised_objs_and_expected: list[tuple[FM
80
95
  json_dict_from_file = json.load(file)["Object Attributes"]
81
96
 
82
97
  # keys to ignore when testing for equivalence
83
- keys_to_remove = ["_filepath", "file", "_log_path"]
98
+ keys_to_remove = ["_filepath", "file", "_log_path", "_gxy_filepath"]
84
99
  for key in keys_to_remove:
85
100
  json_dict_from_obj.pop(key, None)
86
101
  json_dict_from_file.pop(key, None)
@@ -104,11 +119,31 @@ def test_obj_reproduces_from_json_for_all_test_api_files(
104
119
  file_extension_glob,
105
120
  ):
106
121
  """JSON: To test the from_json function, It should produce the same dat file from a json file"""
122
+ fail_list = []
107
123
  for file in Path(test_workspace).glob(file_extension_glob):
108
124
  if file.name.startswith("duplicate_unit_test"):
109
125
  # Skipping as invalid DAT (duplicate units)
110
126
  continue
111
- assert api_class(file) == api_class.from_json(api_class(file).to_json())
127
+
128
+ if api_class(file) != api_class.from_json(api_class(file).to_json()):
129
+ fail_list.append(str(file))
130
+ failures = "\n".join(fail_list)
131
+ assert len(fail_list) == 0, f"The following files did not reproduce:\n{failures}"
132
+
133
+
134
+ @pytest.mark.parametrize(
135
+ "unit",
136
+ [
137
+ RIVER(),
138
+ QTBDY(),
139
+ INTERPOLATE(),
140
+ INTERPOLATE(easting=123.4, northing=987.6),
141
+ SPILL(),
142
+ FLOODPLAIN(),
143
+ ],
144
+ )
145
+ def test_obj_reproduces_from_json_for_units(unit):
146
+ assert unit == unit.from_json(unit.to_json())
112
147
 
113
148
 
114
149
  def test_is_jsonable_with_jsonable_object():
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import pandas as pd
3
4
  import pytest
4
5
 
6
+ from floodmodeller_api.units import QTBDY
5
7
  from floodmodeller_api.units._base import Unit # update this import path to match your repo
6
8
 
7
9
 
@@ -55,3 +57,13 @@ def test_remove_unit_name(unit: str, header: str, remove_revision: bool, expecte
55
57
  dummy_unit = DummyUnit(unit)
56
58
  result = dummy_unit._remove_unit_name(header, remove_revision=remove_revision)
57
59
  assert result == expected_result
60
+
61
+
62
+ def test_partially_defined_unit():
63
+ actual = QTBDY(["QTBDY comment", "test", "1", "0"]).data
64
+ expected = pd.Series(
65
+ [0],
66
+ index=pd.Index([0], name="Time"),
67
+ name="Flow",
68
+ )
69
+ pd.testing.assert_series_equal(expected, actual)
@@ -82,6 +82,11 @@ def recursive_to_json(obj: Any, is_top_level: bool = True) -> Any: # noqa: PLR0
82
82
  from .units._base import Unit
83
83
  from .urban1d._base import UrbanSubsection, UrbanUnit
84
84
 
85
+ if isinstance(obj, tuple):
86
+ return {
87
+ "python_tuple": [recursive_to_json(item, is_top_level=False) for item in obj],
88
+ }
89
+
85
90
  if is_jsonable(obj):
86
91
  return obj
87
92
 
@@ -144,7 +149,7 @@ def from_json(obj: str | dict) -> dict:
144
149
  return recursive_from_json(obj_dict)
145
150
 
146
151
 
147
- def recursive_from_json(obj: dict | Any) -> Any:
152
+ def recursive_from_json(obj: dict | Any) -> Any: # noqa: C901
148
153
  """
149
154
  Function to undertake a recursion through the different elements of the JSON object
150
155
 
@@ -173,7 +178,16 @@ def recursive_from_json(obj: dict | Any) -> Any:
173
178
  return reconstructed_sr
174
179
 
175
180
  if "python_set" in obj:
176
- return set(obj["python_set"])
181
+ return {
182
+ recursive_from_json(item) if isinstance(item, dict) else item
183
+ for item in obj["python_set"]
184
+ }
185
+
186
+ if "python_tuple" in obj:
187
+ return tuple(
188
+ recursive_from_json(item) if isinstance(item, dict) else item
189
+ for item in obj["python_tuple"]
190
+ )
177
191
 
178
192
  for key, value in obj.items():
179
193
  if isinstance(value, dict):
@@ -437,7 +437,7 @@ class StructureLogBuilder:
437
437
  height = opening["opening_height"]
438
438
  width = opening["width"]
439
439
 
440
- text += f"Opening {n+1}: h: {height:.2f} x w: {width:.2f} "
440
+ text += f"Opening {n + 1}: h: {height:.2f} x w: {width:.2f} "
441
441
 
442
442
  return text.rstrip()
443
443
 
@@ -480,24 +480,24 @@ class StructureLogBuilder:
480
480
  culvert_loss = ""
481
481
  match unit_dict["subtype"]:
482
482
  case "CIRCULAR":
483
- text += f'dia: {unit_dict["dimensions"]["diameter"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
483
+ text += f"dia: {unit_dict['dimensions']['diameter']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
484
484
  case "SPRUNGARCH" | "SPRUNG":
485
- text += f'(Springing: {unit_dict["dimensions"]["height_springing"]:.2f}, Crown: {unit_dict["dimensions"]["height_crown"]:.2f}) x w: {unit_dict["dimensions"]["width"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
485
+ text += f"(Springing: {unit_dict['dimensions']['height_springing']:.2f}, Crown: {unit_dict['dimensions']['height_crown']:.2f}) x w: {unit_dict['dimensions']['width']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
486
486
  case "RECTANGULAR":
487
- text += f'h: {unit_dict["dimensions"]["height"]:.2f} x w: {unit_dict["dimensions"]["width"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
487
+ text += f"h: {unit_dict['dimensions']['height']:.2f} x w: {unit_dict['dimensions']['width']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
488
488
  case "SECTION":
489
- text += f'h: {unit_dict["dimensions"]["height"]:.2f} x w: {unit_dict["dimensions"]["width"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
489
+ text += f"h: {unit_dict['dimensions']['height']:.2f} x w: {unit_dict['dimensions']['width']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
490
490
  case _:
491
491
  return "", ""
492
492
 
493
493
  if "total_length" in unit_dict["conduit_data"]:
494
- text += f' (Total conduit length: {unit_dict["conduit_data"]["total_length"]:.2f})'
494
+ text += f" (Total conduit length: {unit_dict['conduit_data']['total_length']:.2f})"
495
495
 
496
496
  if "inlet" in unit_dict["conduit_data"]:
497
- culvert_loss += f'Ki: {unit_dict["conduit_data"]["inlet"]}, '
497
+ culvert_loss += f"Ki: {unit_dict['conduit_data']['inlet']}, "
498
498
 
499
499
  if "outlet" in unit_dict["conduit_data"]:
500
- culvert_loss += f'Ko: {unit_dict["conduit_data"]["outlet"]}, '
500
+ culvert_loss += f"Ko: {unit_dict['conduit_data']['outlet']}, "
501
501
 
502
502
  culvert_loss = culvert_loss.rstrip(", ")
503
503
 
@@ -35,6 +35,7 @@ class Unit(Jsonable):
35
35
  _unit: str
36
36
  _subtype: str | None = None
37
37
  _name: str | None = None
38
+ _location: tuple[float, float] | None = None
38
39
 
39
40
  def __init__(self, unit_block=None, n=12, from_json: bool = False, **kwargs):
40
41
  if from_json:
@@ -104,6 +105,35 @@ class Unit(Jsonable):
104
105
  msg = "You cannot change the subtype of a unit once it has been instantiated"
105
106
  raise ValueError(msg)
106
107
 
108
+ @property
109
+ def location(self) -> tuple[float, float] | None:
110
+ # gxy data (_location) written upon instantiation when opening DAT.
111
+ # default priority is as follows:
112
+ # 1. gxy data if not None
113
+ # 2. easting and northing attributes if the unit has them (Interpolates, replicates and reservoirs)
114
+ # 3. None
115
+ if self._location is not None:
116
+ return self._location
117
+
118
+ if hasattr(self, "easting") and hasattr(self, "northing"):
119
+ location = (self.easting, self.northing)
120
+ if location != (0, 0):
121
+ return location
122
+
123
+ return None
124
+
125
+ @location.setter
126
+ def location(self, new_value):
127
+ msg = "Currently unit location is read-only."
128
+ raise NotImplementedError(msg)
129
+
130
+ def set_cached_location_from_gxy(self, location):
131
+ """Used by DAT to set the _location attribute when reading a gxy.
132
+
133
+ This is not a setter, and will not make .gxy modifications.
134
+ """
135
+ self._location = location
136
+
107
137
  def __repr__(self):
108
138
  if self._subtype is None:
109
139
  return f"<floodmodeller_api Unit Class: {self._unit}(name={self._name})>"
@@ -106,7 +106,8 @@ class QTBDY(Unit):
106
106
  if self.timeunit == "DATES"
107
107
  else to_data_list(qtbdy_block[3:])
108
108
  )
109
-
109
+ if data_list == [[0]]:
110
+ data_list = [[0, 0]]
110
111
  self.data = pd.DataFrame(data_list, columns=["Flow", "Time"])
111
112
  self.data = self.data.set_index("Time")
112
113
  self.data = self.data["Flow"] # Convert to series
@@ -200,6 +201,8 @@ class HTBDY(Unit):
200
201
  if self.timeunit == "DATES"
201
202
  else to_data_list(htbdy_block[3:])
202
203
  )
204
+ if data_list == [[0]]:
205
+ data_list = [[0, 0]]
203
206
 
204
207
  self.data = pd.DataFrame(data_list, columns=["Stage", "Time"])
205
208
  self.data = self.data.set_index("Time")
@@ -183,7 +183,7 @@ class CONDUIT(Unit):
183
183
  """Function to read a given CONDUIT block and store data as class attributes"""
184
184
  self._subtype = self._get_first_word(c_block[1])
185
185
  # Extends label line to be correct length before splitting to pick up blank labels
186
- labels = split_n_char(f"{c_block[2]:<{2*self._label_len}}", self._label_len)
186
+ labels = split_n_char(f"{c_block[2]:<{2 * self._label_len}}", self._label_len)
187
187
  self.name = labels[0]
188
188
  self.spill = labels[1]
189
189
  self.comment = self._remove_unit_name(c_block[0])
@@ -79,7 +79,7 @@ class CULVERT(Unit):
79
79
  # Extract common attributes
80
80
  self._subtype = self._get_first_word(block[1])
81
81
  self.comment = self._remove_unit_name(block[0])
82
- labels = split_n_char(f"{block[2]:<{4*self._label_len}}", self._label_len)
82
+ labels = split_n_char(f"{block[2]:<{4 * self._label_len}}", self._label_len)
83
83
  self.name = labels[0]
84
84
  self.ds_label = labels[1]
85
85
  self.us_remote_label = labels[2]
@@ -195,7 +195,7 @@ class BLOCKAGE(Unit):
195
195
  self._revision, self.comment = self._get_revision_and_comment(block[0])
196
196
 
197
197
  # Extract labels
198
- labels = split_n_char(f"{block[1]:<{5*self._label_len}}", self._label_len)
198
+ labels = split_n_char(f"{block[1]:<{5 * self._label_len}}", self._label_len)
199
199
  self.name = labels[0]
200
200
  self.ds_label = labels[1]
201
201
  self.us_reference_label = labels[2]
@@ -235,6 +235,42 @@ class RIVER(Unit):
235
235
 
236
236
  return self._raw_block
237
237
 
238
+ @property
239
+ def location(self) -> tuple[float, float] | None:
240
+ # for RIVER units, source priority is as follows:
241
+ # 1. GXY location if defined
242
+ # 2. BED marker location if not (0,0)
243
+ # 3. Y-min location if not (0,0)
244
+ # 4. None
245
+ if self._location is not None:
246
+ return self._location
247
+
248
+ try:
249
+ bed_rows = self.active_data["Marker"] == "BED"
250
+ bed_points = self.active_data.loc[bed_rows]
251
+ first_bed = bed_points[["Easting", "Northing"]].iloc[0]
252
+ location = (float(first_bed["Easting"]), float(first_bed["Northing"]))
253
+ if location != (0, 0):
254
+ return location
255
+ except (ValueError, IndexError):
256
+ pass
257
+
258
+ try:
259
+ min_idx = self.active_data.Y.idxmin()
260
+ min_row = self.active_data.loc[min_idx]
261
+ location = (float(min_row["Easting"]), float(min_row["Northing"]))
262
+ if location != (0, 0):
263
+ return location
264
+ except (ValueError, IndexError):
265
+ pass
266
+
267
+ return None
268
+
269
+ @location.setter
270
+ def location(self, new_value: tuple[float, float] | None) -> None:
271
+ msg = "Currently unit location is read-only."
272
+ raise NotImplementedError(msg)
273
+
238
274
  @property
239
275
  def data(self) -> pd.DataFrame:
240
276
  """Data table for the river cross section.
@@ -47,6 +47,15 @@ from ._helpers import (
47
47
  )
48
48
 
49
49
 
50
+ def _get_median_coordinate(data):
51
+ # trim rows that have invalid coordinates (0,0)
52
+ data = data[(data["Easting"] != 0) | (data["Northing"] != 0)]
53
+ median_coords = data[["Easting", "Northing"]].median()
54
+ if median_coords.isna().any():
55
+ return None
56
+ return (float(median_coords["Easting"]), float(median_coords["Northing"]))
57
+
58
+
50
59
  class BRIDGE(Unit):
51
60
  """Class to hold and process BRIDGE unit type. The Bridge class supports the three main bridge sub-types in
52
61
  Flood Modeller: Arch, USBPR1978 and Pierloss. Each of these sub-types forms a unique instance of the class
@@ -163,7 +172,7 @@ class BRIDGE(Unit):
163
172
  self.comment = self._remove_unit_name(br_block[0])
164
173
  self._subtype = self._get_first_word(br_block[1])
165
174
  # Extends label line to be correct length before splitting to pick up blank labels
166
- labels = split_n_char(f"{br_block[2]:<{4*self._label_len}}", self._label_len)
175
+ labels = split_n_char(f"{br_block[2]:<{4 * self._label_len}}", self._label_len)
167
176
  self.name = labels[0]
168
177
  self.ds_label = labels[1]
169
178
  self.us_remote_label = labels[2]
@@ -374,7 +383,7 @@ class BRIDGE(Unit):
374
383
  )
375
384
  if self.specify_piers:
376
385
  if self.pier_use_calibration_coeff:
377
- pier_params = f'{self.npiers:>10}{"COEFF":<10}{"":>10}{self.pier_calibration_coeff:>10.3f}'
386
+ pier_params = f"{self.npiers:>10}{'COEFF':<10}{'':>10}{self.pier_calibration_coeff:>10.3f}"
378
387
  else:
379
388
  pier_params = f"{self.npiers:>10}{self.pier_shape:<10}{self.pier_faces:<10}"
380
389
  else:
@@ -589,7 +598,7 @@ class SLUICE(Unit):
589
598
  self._subtype = self._get_first_word(block[1])
590
599
 
591
600
  # Extends label line to be correct length before splitting to pick up blank labels
592
- labels = split_n_char(f"{block[2]:<{3*self._label_len}}", self._label_len)
601
+ labels = split_n_char(f"{block[2]:<{3 * self._label_len}}", self._label_len)
593
602
  self.name = labels[0]
594
603
  self.ds_label = labels[1]
595
604
  self.remote_label = labels[2]
@@ -675,8 +684,8 @@ class SLUICE(Unit):
675
684
  self.weir_length,
676
685
  )
677
686
  if self.subtype == "RADIAL":
678
- params1 += f'{"DEGREES":<10}' if self.use_degrees else f'{"":<10}'
679
- params1 += "FREESLUICE" if self.allow_free_flow_under else f'{"":<10}'
687
+ params1 += f"{'DEGREES':<10}" if self.use_degrees else f"{'':<10}"
688
+ params1 += "FREESLUICE" if self.allow_free_flow_under else f"{'':<10}"
680
689
 
681
690
  # Second parameter line
682
691
  params2 = join_10_char(
@@ -726,7 +735,7 @@ class SLUICE(Unit):
726
735
  # ADD GATES
727
736
  block.append(
728
737
  join_10_char(
729
- f'{"LOGICAL":<10}',
738
+ f"{'LOGICAL':<10}",
730
739
  self.max_movement_rate,
731
740
  self.max_setting,
732
741
  self.min_setting,
@@ -827,7 +836,7 @@ class ORIFICE(Unit):
827
836
  self.flapped = self.subtype == "FLAPPED"
828
837
 
829
838
  # Extends label line to be correct length before splitting to pick up blank labels
830
- labels = split_n_char(f"{block[2]:<{2*self._label_len}}", self._label_len)
839
+ labels = split_n_char(f"{block[2]:<{2 * self._label_len}}", self._label_len)
831
840
  self.name = labels[0]
832
841
  self.ds_label = labels[1]
833
842
  self.comment = self._remove_unit_name(block[0])
@@ -928,7 +937,7 @@ class SPILL(Unit):
928
937
  def _read(self, block):
929
938
  """Function to read a given SPILL block and store data as class attributes"""
930
939
  # Extends label line to be correct length before splitting to pick up blank labels
931
- labels = split_n_char(f"{block[1]:<{2*self._label_len}}", self._label_len)
940
+ labels = split_n_char(f"{block[1]:<{2 * self._label_len}}", self._label_len)
932
941
  self.name = labels[0]
933
942
  self.ds_label = labels[1]
934
943
  self.comment = self._remove_unit_name(block[0])
@@ -984,6 +993,25 @@ class SPILL(Unit):
984
993
  else pd.DataFrame([[0.0, 0.0, 0.0, 0.0]], columns=["X", "Y", "Easting", "Northing"])
985
994
  )
986
995
 
996
+ @property
997
+ def location(self) -> tuple[float, float] | None:
998
+ # for SPILL units, source priority is as follows:
999
+ # 1. GXY location if defined
1000
+ # 2. median location if not (0,0)
1001
+ # 3. None
1002
+ if self._location is not None:
1003
+ return self._location
1004
+
1005
+ try:
1006
+ return _get_median_coordinate(self.data)
1007
+ except (ValueError, IndexError):
1008
+ return None
1009
+
1010
+ @location.setter
1011
+ def location(self, new_value: tuple[float, float] | None) -> None:
1012
+ msg = "Currently unit location is read-only."
1013
+ raise NotImplementedError(msg)
1014
+
987
1015
 
988
1016
  class RNWEIR(Unit):
989
1017
  """Class to hold and process RNWEIR unit type
@@ -1009,7 +1037,7 @@ class RNWEIR(Unit):
1009
1037
  def _read(self, block):
1010
1038
  """Function to read a given RNWEIR block and store data as class attributes"""
1011
1039
  # Extends label line to be correct length before splitting to pick up blank labels
1012
- labels = split_n_char(f"{block[1]:<{2*self._label_len}}", self._label_len)
1040
+ labels = split_n_char(f"{block[1]:<{2 * self._label_len}}", self._label_len)
1013
1041
  self.name = labels[0]
1014
1042
  self.ds_label = labels[1]
1015
1043
  self.comment = self._remove_unit_name(block[0])
@@ -1110,7 +1138,7 @@ class WEIR(Unit):
1110
1138
  def _read(self, block):
1111
1139
  """Function to read a given WEIR block and store data as class attributes"""
1112
1140
  # Extends label line to be correct length before splitting to pick up blank labels
1113
- labels = split_n_char(f"{block[1]:<{2*self._label_len}}", self._label_len)
1141
+ labels = split_n_char(f"{block[1]:<{2 * self._label_len}}", self._label_len)
1114
1142
  self.name = labels[0]
1115
1143
  self.ds_label = labels[1]
1116
1144
  self.comment = self._remove_unit_name(block[0])
@@ -1202,7 +1230,7 @@ class CRUMP(Unit):
1202
1230
  def _read(self, block):
1203
1231
  """Function to read a given CRUMP block and store data as class attributes"""
1204
1232
  # Extends label line to be correct length before splitting to pick up blank labels
1205
- labels = split_n_char(f"{block[1]:<{4*self._label_len}}", self._label_len)
1233
+ labels = split_n_char(f"{block[1]:<{4 * self._label_len}}", self._label_len)
1206
1234
  self.name = labels[0]
1207
1235
  self.ds_label = labels[1]
1208
1236
  self.us_remote_label = labels[2]
@@ -1311,7 +1339,7 @@ class FLAT_V_WEIR(Unit): # noqa: N801
1311
1339
  def _read(self, block):
1312
1340
  """Function to read a given FLAT-V WEIR block and store data as class attributes"""
1313
1341
  # Extends label line to be correct length before splitting to pick up blank labels
1314
- labels = split_n_char(f"{block[1]:<{4*self._label_len}}", self._label_len)
1342
+ labels = split_n_char(f"{block[1]:<{4 * self._label_len}}", self._label_len)
1315
1343
  self.name = labels[0]
1316
1344
  self.ds_label = labels[1]
1317
1345
  self.us_remote_label = labels[2]
@@ -1440,7 +1468,7 @@ class OUTFALL(Unit):
1440
1468
  self.flapped = self.subtype == "FLAPPED"
1441
1469
 
1442
1470
  # Extends label line to be correct length before splitting to pick up blank labels
1443
- labels = split_n_char(f"{block[2]:<{2*self._label_len}}", self._label_len)
1471
+ labels = split_n_char(f"{block[2]:<{2 * self._label_len}}", self._label_len)
1444
1472
  self.name = labels[0]
1445
1473
  self.ds_label = labels[1]
1446
1474
  self.comment = self._remove_unit_name(block[0])
@@ -1660,3 +1688,22 @@ class FLOODPLAIN(Unit):
1660
1688
  msg = f"The DataFrame must only contain columns: {self._required_columns}"
1661
1689
  raise ValueError(msg)
1662
1690
  self._data = new_df
1691
+
1692
+ @property
1693
+ def location(self) -> tuple[float, float] | None:
1694
+ # for FLOODPLAIN units, source priority is as follows:
1695
+ # 1. GXY location if defined
1696
+ # 2. median location if not (0,0)
1697
+ # 3. None
1698
+ if self._location is not None:
1699
+ return self._location
1700
+
1701
+ try:
1702
+ return _get_median_coordinate(self.data)
1703
+ except (ValueError, IndexError):
1704
+ return None
1705
+
1706
+ @location.setter
1707
+ def location(self, new_value: tuple[float, float] | None) -> None:
1708
+ msg = "Currently unit location is read-only."
1709
+ raise NotImplementedError(msg)
@@ -29,11 +29,11 @@ class UNSUPPORTED(Unit):
29
29
  self.comment = self._remove_unit_name(block[0])
30
30
 
31
31
  if self._subtype is False:
32
- self.labels = split_n_char(f"{block[1]:<{2*self._label_len}}", self._label_len)
32
+ self.labels = split_n_char(f"{block[1]:<{2 * self._label_len}}", self._label_len)
33
33
 
34
34
  else:
35
35
  self._subtype = self._get_first_word(block[1])
36
- self.labels = split_n_char(f"{block[2]:<{2*self._label_len}}", self._label_len)
36
+ self.labels = split_n_char(f"{block[2]:<{2 * self._label_len}}", self._label_len)
37
37
 
38
38
  if self.labels[1] != "":
39
39
  self.ds_label = self.labels[1]
@@ -46,12 +46,12 @@ def _validate_unit(unit, urban=False):
46
46
 
47
47
  def _validate_parameter(param, value): # noqa: C901, PLR0911, PLR0912
48
48
  if param["type"] == "type-match":
49
- return isinstance(value, param["options"]), f'-> Expected: {param["options"]}'
49
+ return isinstance(value, param["options"]), f"-> Expected: {param['options']}"
50
50
 
51
51
  if param["type"] == "value-match":
52
52
  if isinstance(value, str):
53
- return value.upper() in param["options"], f'-> Expected: {param["options"]}'
54
- return value in param["options"], f'-> Expected: {param["options"]}'
53
+ return value.upper() in param["options"], f"-> Expected: {param['options']}"
54
+ return value in param["options"], f"-> Expected: {param['options']}"
55
55
 
56
56
  if param["type"] == "end-value-match":
57
57
  if value.strip().upper().endswith(tuple(param["options"])):
@@ -70,7 +70,7 @@ def _validate_parameter(param, value): # noqa: C901, PLR0911, PLR0912
70
70
 
71
71
  return (
72
72
  type_match_result or value_match_result,
73
- f'-> Expected: Type {param["options"][0]} or Value {param["options"][1]}',
73
+ f"-> Expected: Type {param['options'][0]} or Value {param['options'][1]}",
74
74
  )
75
75
 
76
76
  if param["type"] == "value-range":
@@ -84,13 +84,13 @@ def _validate_parameter(param, value): # noqa: C901, PLR0911, PLR0912
84
84
  if param["type"] == "string-length":
85
85
  return (
86
86
  len(value) <= param["max_length"],
87
- f'-> Exceeds {param["max_length"]} characters',
87
+ f"-> Exceeds {param['max_length']} characters",
88
88
  )
89
89
 
90
90
  if param["type"] == "list-string-length":
91
91
  return (
92
92
  all(len(item) <= param["max_length"] for item in value),
93
- f'-> Contains labels exceeding {param["max_length"]} characters',
93
+ f"-> Contains labels exceeding {param['max_length']} characters",
94
94
  )
95
95
 
96
96
  if param["type"] == "dict-match":
@@ -1 +1 @@
1
- __version__ = "0.5.3.post2"
1
+ __version__ = "0.5.5.post1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: floodmodeller_api
3
- Version: 0.5.3.post2
3
+ Version: 0.5.5.post1
4
4
  Summary: Extends the functionality of Flood Modeller to python users
5
5
  Author: Jacobs
6
6
  Author-email: joe.pierce@jacobs.com