floodmodeller-api 0.5.3.post1__py3-none-any.whl → 0.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. floodmodeller_api/dat.py +140 -53
  2. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +1 -2
  3. floodmodeller_api/ied.py +1 -1
  4. floodmodeller_api/test/test_dat.py +89 -2
  5. floodmodeller_api/test/test_data/All Units 4_6.DAT +0 -2
  6. floodmodeller_api/test/test_data/All Units 4_6.feb +0 -2
  7. floodmodeller_api/test/test_data/River_Bridge.dat +1453 -0
  8. floodmodeller_api/test/test_data/River_Bridge.gxy +221 -0
  9. floodmodeller_api/test/test_data/River_Bridge_DAT_expected.json +27273 -0
  10. floodmodeller_api/test/test_data/River_Bridge_no_gxy.dat +1453 -0
  11. floodmodeller_api/test/test_data/River_Bridge_no_gxy_DAT_expected.json +26853 -0
  12. floodmodeller_api/test/test_data/duplicate_unit_test.dat +18 -0
  13. floodmodeller_api/test/test_data/duplicate_unit_test_unsupported.dat +28 -0
  14. floodmodeller_api/test/test_data/encoding_test_cp1252.dat +0 -2
  15. floodmodeller_api/test/test_data/encoding_test_utf8.dat +0 -2
  16. floodmodeller_api/test/test_data/remove_dummy_test.dat +19 -0
  17. floodmodeller_api/test/test_gxy.py +98 -0
  18. floodmodeller_api/test/test_json.py +40 -2
  19. floodmodeller_api/test/test_read_file.py +3 -0
  20. floodmodeller_api/test/test_unit.py +12 -0
  21. floodmodeller_api/to_from_json.py +16 -2
  22. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +8 -8
  23. floodmodeller_api/units/_base.py +30 -0
  24. floodmodeller_api/units/boundaries.py +4 -1
  25. floodmodeller_api/units/conduits.py +1 -1
  26. floodmodeller_api/units/losses.py +2 -2
  27. floodmodeller_api/units/sections.py +36 -0
  28. floodmodeller_api/units/structures.py +60 -13
  29. floodmodeller_api/units/units.py +1 -0
  30. floodmodeller_api/units/unsupported.py +2 -2
  31. floodmodeller_api/validation/validation.py +6 -6
  32. floodmodeller_api/version.py +1 -1
  33. {floodmodeller_api-0.5.3.post1.dist-info → floodmodeller_api-0.5.5.dist-info}/METADATA +1 -1
  34. {floodmodeller_api-0.5.3.post1.dist-info → floodmodeller_api-0.5.5.dist-info}/RECORD +38 -29
  35. {floodmodeller_api-0.5.3.post1.dist-info → floodmodeller_api-0.5.5.dist-info}/WHEEL +0 -0
  36. {floodmodeller_api-0.5.3.post1.dist-info → floodmodeller_api-0.5.5.dist-info}/entry_points.txt +0 -0
  37. {floodmodeller_api-0.5.3.post1.dist-info → floodmodeller_api-0.5.5.dist-info}/licenses/LICENSE.txt +0 -0
  38. {floodmodeller_api-0.5.3.post1.dist-info → floodmodeller_api-0.5.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,18 @@
1
+ duplicate units for test
2
+ #REVISION#1
3
+ 1 0.750 0.900 0.100 0.001 12
4
+ 10.000 0.010 0.010 0.700 0.100 0.700 0.000
5
+ RAD FILE
6
+
7
+ END GENERAL
8
+ QTBDY
9
+ qtboundary
10
+ 1 0.000 0.000 seconds EXTEND LINEAR 1.000
11
+ 20.000 0.000
12
+ QTBDY
13
+ qtboundary
14
+ 1 0.000 0.000 seconds EXTEND LINEAR 1.000
15
+ 20.000 0.000
16
+ GISINFO
17
+ QTBDY 0 478 986 0 0 1
18
+ 0 0 0 0 0 0
@@ -0,0 +1,28 @@
1
+ duplicate units for test
2
+ #REVISION#1
3
+ 2 0.750 0.900 0.100 0.001 12
4
+ 10.000 0.010 0.010 0.700 0.100 0.700 0.000
5
+ RAD FILE
6
+
7
+ END GENERAL
8
+ APITESTDUMMY Dummy unnsupported unit for testing purposes
9
+ LBL001 LBL002
10
+ arbitrary data
11
+ table01234
12
+ -0.500 0.000 0.000 0.000091000000.0
13
+ 0.000 1.000 1.000 0.0000 910000000
14
+ 1.000 2.000 2.000 0.000091000000.0
15
+ 2.000 3.000 3.000 0.000091000000.0
16
+ 5.000 3.000 3.000 0.000091000000.0
17
+ APITESTDUMMY Dummy unnsupported unit for testing purposes
18
+ LBL001 LBL002
19
+ arbitrary data
20
+ table01234
21
+ -0.500 0.000 0.000 0.000091000000.0
22
+ 0.000 1.000 1.000 0.0000 910000000
23
+ 1.000 2.000 2.000 0.000091000000.0
24
+ 2.000 3.000 3.000 0.000091000000.0
25
+ 5.000 3.000 3.000 0.000091000000.0
26
+ GISINFO
27
+ APITESTDUMMY 0 478 986 0 0 1
28
+ 0 0 0 0 0 0
@@ -715,8 +715,6 @@ YARNELL
715
715
  4.000 5.000 6.000 5.000
716
716
  GERRBDY
717
717
  Gerry46
718
- GERRBDY
719
- Gerry46
720
718
  #COMMENT
721
719
 
722
720
  ##CATCHMENT DETAILS
@@ -715,8 +715,6 @@ YARNELL
715
715
  4.000 5.000 6.000 5.000
716
716
  GERRBDY
717
717
  Gerry46
718
- GERRBDY
719
- Gerry46
720
718
  #COMMENT
721
719
 
722
720
  ##CATCHMENT DETAILS
@@ -0,0 +1,19 @@
1
+
2
+ #REVISION#1
3
+ 1 0.750 0.900 0.100 0.001 12SI
4
+ 10.000 0.010 0.010 0.700 0.100 0.700 0.000
5
+ RAD FILE
6
+
7
+ END GENERAL
8
+ APITESTDUMMY Dummy unnsupported unit for testing purposes
9
+ LBL001 LBL002
10
+ arbitrary data
11
+ table01234
12
+ -0.500 0.000 0.000 0.000091000000.0
13
+ 0.000 1.000 1.000 0.0000 910000000
14
+ 1.000 2.000 2.000 0.000091000000.0
15
+ 2.000 3.000 3.000 0.000091000000.0
16
+ 5.000 3.000 3.000 0.000091000000.0
17
+ INITIAL CONDITIONS
18
+ label ? flow stage froude no velocity umode ustate z
19
+ LBL001 y 0.000 0.000 0.000 0.000 0.000 0.000 0.000
@@ -0,0 +1,98 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import pytest
6
+
7
+ import floodmodeller_api.units
8
+ from floodmodeller_api import DAT
9
+ from floodmodeller_api.units import (
10
+ FLOODPLAIN,
11
+ INTERPOLATE,
12
+ JUNCTION,
13
+ QTBDY,
14
+ REPLICATE,
15
+ RESERVOIR,
16
+ RIVER,
17
+ SPILL,
18
+ )
19
+
20
+
21
+ # this would be a fixture but doesnt work when used in parameterised test.
22
+ def blank_with_location(unit_class, *args, **kwargs):
23
+ unit = unit_class(*args, **kwargs)
24
+ unit._location = (461193.10, 339088.74)
25
+ return unit
26
+
27
+
28
+ @pytest.mark.parametrize(
29
+ ("unit", "expected_outcome"),
30
+ [
31
+ (RIVER(), None),
32
+ (QTBDY(), None),
33
+ (INTERPOLATE(), None),
34
+ (INTERPOLATE(easting=123.4, northing=987.6), (123.4, 987.6)),
35
+ (REPLICATE(), None),
36
+ (REPLICATE(easting=123.4, northing=987.6), (123.4, 987.6)),
37
+ (RESERVOIR(), None),
38
+ (RESERVOIR(easting=123.4, northing=987.6), (123.4, 987.6)),
39
+ (SPILL(), None),
40
+ (FLOODPLAIN(), None),
41
+ (blank_with_location(QTBDY), (461193.10, 339088.74)),
42
+ (blank_with_location(RIVER), (461193.10, 339088.74)),
43
+ (blank_with_location(INTERPOLATE), (461193.10, 339088.74)),
44
+ (blank_with_location(INTERPOLATE, easting=123.4, northing=987.6), (461193.10, 339088.74)),
45
+ (blank_with_location(SPILL), (461193.10, 339088.74)),
46
+ (blank_with_location(FLOODPLAIN), (461193.10, 339088.74)),
47
+ ],
48
+ )
49
+ def test_unit_location(unit, expected_outcome):
50
+ assert unit.location == expected_outcome
51
+
52
+
53
+ def get_supported_unit_classes():
54
+ all_unit_classes = []
55
+ for unit_type, attributes in floodmodeller_api.units.SUPPORTED_UNIT_TYPES.items():
56
+ if attributes["group"] not in ("other", "comments"):
57
+ unit_type_safe = unit_type.replace(" ", "_").replace("-", "_")
58
+ # Borrowed replacing idea from .dat
59
+ unit_class = getattr(floodmodeller_api.units, unit_type_safe)
60
+ all_unit_classes.append(unit_class)
61
+ return all_unit_classes
62
+
63
+
64
+ SUPPORTED_UNIT_CLASSES = get_supported_unit_classes()
65
+
66
+
67
+ @pytest.mark.parametrize("unit_class", SUPPORTED_UNIT_CLASSES)
68
+ def test_setting_location(unit_class):
69
+ # first check that we get the not implemented error, then check that the location is still unaffected.
70
+ # this test should be updated when location is read/write capable.
71
+ try:
72
+ # Junction units cannot be created from blank without at least one label.
73
+ unit = unit_class(labels=["label1"]) if unit_class == JUNCTION else unit_class()
74
+ except NotImplementedError as error:
75
+ pytest.skip(f"Creating unit {unit_class=} from blank not supported, skipping...\n{error=}")
76
+
77
+ with pytest.raises(NotImplementedError):
78
+ unit.location = (461382.54, 339188.26)
79
+
80
+ assert unit.location is None
81
+ assert unit._location is None
82
+
83
+
84
+ @pytest.mark.parametrize(
85
+ ("dat_name", "group", "label", "expected_outcome"),
86
+ [
87
+ ("EX1.DAT", "sections", "S4", (-38203.94169253, 153846.153846154)),
88
+ ("River_Bridge_no_gxy.dat", "sections", "M029", (385029.200, 242717.100)),
89
+ ("River_Bridge_no_gxy.dat", "sections", "M030", (384689.300, 242345.700)),
90
+ ("River_Bridge_no_gxy.dat", "sections", "M031", (384545.000, 241937.000)),
91
+ ("River_Bridge_no_gxy.dat", "structures", "M047spU", (386710.9, 236857.85)),
92
+ ],
93
+ )
94
+ def test_unit_from_dat(test_workspace, dat_name, group, label, expected_outcome):
95
+ dat_path = Path(test_workspace, dat_name)
96
+ dat = DAT(dat_path)
97
+ unit = getattr(dat, group)[label]
98
+ assert unit.location == expected_outcome
@@ -8,6 +8,13 @@ import pytest
8
8
 
9
9
  from floodmodeller_api import DAT, IED, IEF, INP, XML2D
10
10
  from floodmodeller_api.to_from_json import is_jsonable
11
+ from floodmodeller_api.units import (
12
+ FLOODPLAIN,
13
+ INTERPOLATE,
14
+ QTBDY,
15
+ RIVER,
16
+ SPILL,
17
+ )
11
18
  from floodmodeller_api.util import read_file
12
19
 
13
20
  if TYPE_CHECKING:
@@ -65,6 +72,14 @@ def parameterised_objs_and_expected(test_workspace) -> list[tuple[FMFile, Path]]
65
72
  (IEF(test_workspace / "ex3.ief"), test_workspace / "EX3_IEF_expected.json"),
66
73
  (XML2D(test_workspace / "Domain1_Q.xml"), test_workspace / "Domain1_Q_xml_expected.json"),
67
74
  (XML2D(test_workspace / "Linked1D2D.xml"), test_workspace / "Linked1D2D_xml_expected.json"),
75
+ (
76
+ DAT(test_workspace / "River_Bridge.dat"),
77
+ test_workspace / "River_Bridge_DAT_expected.json",
78
+ ),
79
+ (
80
+ DAT(test_workspace / "River_Bridge_no_gxy.dat"),
81
+ test_workspace / "River_Bridge_no_gxy_DAT_expected.json",
82
+ ),
68
83
  ]
69
84
 
70
85
 
@@ -80,7 +95,7 @@ def test_to_json_matches_expected(parameterised_objs_and_expected: list[tuple[FM
80
95
  json_dict_from_file = json.load(file)["Object Attributes"]
81
96
 
82
97
  # keys to ignore when testing for equivalence
83
- keys_to_remove = ["_filepath", "file", "_log_path"]
98
+ keys_to_remove = ["_filepath", "file", "_log_path", "_gxy_filepath"]
84
99
  for key in keys_to_remove:
85
100
  json_dict_from_obj.pop(key, None)
86
101
  json_dict_from_file.pop(key, None)
@@ -104,8 +119,31 @@ def test_obj_reproduces_from_json_for_all_test_api_files(
104
119
  file_extension_glob,
105
120
  ):
106
121
  """JSON: To test the from_json function, It should produce the same dat file from a json file"""
122
+ fail_list = []
107
123
  for file in Path(test_workspace).glob(file_extension_glob):
108
- assert api_class(file) == api_class.from_json(api_class(file).to_json())
124
+ if file.name.startswith("duplicate_unit_test"):
125
+ # Skipping as invalid DAT (duplicate units)
126
+ continue
127
+
128
+ if api_class(file) != api_class.from_json(api_class(file).to_json()):
129
+ fail_list.append(str(file))
130
+ failures = "\n".join(fail_list)
131
+ assert len(fail_list) == 0, f"The following files did not reproduce:\n{failures}"
132
+
133
+
134
+ @pytest.mark.parametrize(
135
+ "unit",
136
+ [
137
+ RIVER(),
138
+ QTBDY(),
139
+ INTERPOLATE(),
140
+ INTERPOLATE(easting=123.4, northing=987.6),
141
+ SPILL(),
142
+ FLOODPLAIN(),
143
+ ],
144
+ )
145
+ def test_obj_reproduces_from_json_for_units(unit):
146
+ assert unit == unit.from_json(unit.to_json())
109
147
 
110
148
 
111
149
  def test_is_jsonable_with_jsonable_object():
@@ -6,6 +6,9 @@ from floodmodeller_api.util import FloodModellerAPIError
6
6
 
7
7
  def test_read_file(test_workspace):
8
8
  for file in test_workspace.glob("*"):
9
+ if file.name.startswith("duplicate_unit_test"):
10
+ # Skipping as invalid DAT (duplicate units)
11
+ continue
9
12
  if (
10
13
  file.suffix.lower()
11
14
  in [
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import pandas as pd
3
4
  import pytest
4
5
 
6
+ from floodmodeller_api.units import QTBDY
5
7
  from floodmodeller_api.units._base import Unit # update this import path to match your repo
6
8
 
7
9
 
@@ -55,3 +57,13 @@ def test_remove_unit_name(unit: str, header: str, remove_revision: bool, expecte
55
57
  dummy_unit = DummyUnit(unit)
56
58
  result = dummy_unit._remove_unit_name(header, remove_revision=remove_revision)
57
59
  assert result == expected_result
60
+
61
+
62
+ def test_partially_defined_unit():
63
+ actual = QTBDY(["QTBDY comment", "test", "1", "0"]).data
64
+ expected = pd.Series(
65
+ [0],
66
+ index=pd.Index([0], name="Time"),
67
+ name="Flow",
68
+ )
69
+ pd.testing.assert_series_equal(expected, actual)
@@ -82,6 +82,11 @@ def recursive_to_json(obj: Any, is_top_level: bool = True) -> Any: # noqa: PLR0
82
82
  from .units._base import Unit
83
83
  from .urban1d._base import UrbanSubsection, UrbanUnit
84
84
 
85
+ if isinstance(obj, tuple):
86
+ return {
87
+ "python_tuple": [recursive_to_json(item, is_top_level=False) for item in obj],
88
+ }
89
+
85
90
  if is_jsonable(obj):
86
91
  return obj
87
92
 
@@ -144,7 +149,7 @@ def from_json(obj: str | dict) -> dict:
144
149
  return recursive_from_json(obj_dict)
145
150
 
146
151
 
147
- def recursive_from_json(obj: dict | Any) -> Any:
152
+ def recursive_from_json(obj: dict | Any) -> Any: # noqa: C901
148
153
  """
149
154
  Function to undertake a recursion through the different elements of the JSON object
150
155
 
@@ -173,7 +178,16 @@ def recursive_from_json(obj: dict | Any) -> Any:
173
178
  return reconstructed_sr
174
179
 
175
180
  if "python_set" in obj:
176
- return set(obj["python_set"])
181
+ return {
182
+ recursive_from_json(item) if isinstance(item, dict) else item
183
+ for item in obj["python_set"]
184
+ }
185
+
186
+ if "python_tuple" in obj:
187
+ return tuple(
188
+ recursive_from_json(item) if isinstance(item, dict) else item
189
+ for item in obj["python_tuple"]
190
+ )
177
191
 
178
192
  for key, value in obj.items():
179
193
  if isinstance(value, dict):
@@ -437,7 +437,7 @@ class StructureLogBuilder:
437
437
  height = opening["opening_height"]
438
438
  width = opening["width"]
439
439
 
440
- text += f"Opening {n+1}: h: {height:.2f} x w: {width:.2f} "
440
+ text += f"Opening {n + 1}: h: {height:.2f} x w: {width:.2f} "
441
441
 
442
442
  return text.rstrip()
443
443
 
@@ -480,24 +480,24 @@ class StructureLogBuilder:
480
480
  culvert_loss = ""
481
481
  match unit_dict["subtype"]:
482
482
  case "CIRCULAR":
483
- text += f'dia: {unit_dict["dimensions"]["diameter"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
483
+ text += f"dia: {unit_dict['dimensions']['diameter']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
484
484
  case "SPRUNGARCH" | "SPRUNG":
485
- text += f'(Springing: {unit_dict["dimensions"]["height_springing"]:.2f}, Crown: {unit_dict["dimensions"]["height_crown"]:.2f}) x w: {unit_dict["dimensions"]["width"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
485
+ text += f"(Springing: {unit_dict['dimensions']['height_springing']:.2f}, Crown: {unit_dict['dimensions']['height_crown']:.2f}) x w: {unit_dict['dimensions']['width']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
486
486
  case "RECTANGULAR":
487
- text += f'h: {unit_dict["dimensions"]["height"]:.2f} x w: {unit_dict["dimensions"]["width"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
487
+ text += f"h: {unit_dict['dimensions']['height']:.2f} x w: {unit_dict['dimensions']['width']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
488
488
  case "SECTION":
489
- text += f'h: {unit_dict["dimensions"]["height"]:.2f} x w: {unit_dict["dimensions"]["width"]:.2f} x l: {unit_dict["conduit_data"]["length"]:.2f}'
489
+ text += f"h: {unit_dict['dimensions']['height']:.2f} x w: {unit_dict['dimensions']['width']:.2f} x l: {unit_dict['conduit_data']['length']:.2f}"
490
490
  case _:
491
491
  return "", ""
492
492
 
493
493
  if "total_length" in unit_dict["conduit_data"]:
494
- text += f' (Total conduit length: {unit_dict["conduit_data"]["total_length"]:.2f})'
494
+ text += f" (Total conduit length: {unit_dict['conduit_data']['total_length']:.2f})"
495
495
 
496
496
  if "inlet" in unit_dict["conduit_data"]:
497
- culvert_loss += f'Ki: {unit_dict["conduit_data"]["inlet"]}, '
497
+ culvert_loss += f"Ki: {unit_dict['conduit_data']['inlet']}, "
498
498
 
499
499
  if "outlet" in unit_dict["conduit_data"]:
500
- culvert_loss += f'Ko: {unit_dict["conduit_data"]["outlet"]}, '
500
+ culvert_loss += f"Ko: {unit_dict['conduit_data']['outlet']}, "
501
501
 
502
502
  culvert_loss = culvert_loss.rstrip(", ")
503
503
 
@@ -35,6 +35,7 @@ class Unit(Jsonable):
35
35
  _unit: str
36
36
  _subtype: str | None = None
37
37
  _name: str | None = None
38
+ _location: tuple[float, float] | None = None
38
39
 
39
40
  def __init__(self, unit_block=None, n=12, from_json: bool = False, **kwargs):
40
41
  if from_json:
@@ -104,6 +105,35 @@ class Unit(Jsonable):
104
105
  msg = "You cannot change the subtype of a unit once it has been instantiated"
105
106
  raise ValueError(msg)
106
107
 
108
+ @property
109
+ def location(self) -> tuple[float, float] | None:
110
+ # gxy data (_location) written upon instantiation when opening DAT.
111
+ # default priority is as follows:
112
+ # 1. gxy data if not None
113
+ # 2. easting and northing attributes if the unit has them (Interpolates, replicates and reservoirs)
114
+ # 3. None
115
+ if self._location is not None:
116
+ return self._location
117
+
118
+ if hasattr(self, "easting") and hasattr(self, "northing"):
119
+ location = (self.easting, self.northing)
120
+ if location != (0, 0):
121
+ return location
122
+
123
+ return None
124
+
125
+ @location.setter
126
+ def location(self, new_value):
127
+ msg = "Currently unit location is read-only."
128
+ raise NotImplementedError(msg)
129
+
130
+ def set_cached_location_from_gxy(self, location):
131
+ """Used by DAT to set the _location attribute when reading a gxy.
132
+
133
+ This is not a setter, and will not make .gxy modifications.
134
+ """
135
+ self._location = location
136
+
107
137
  def __repr__(self):
108
138
  if self._subtype is None:
109
139
  return f"<floodmodeller_api Unit Class: {self._unit}(name={self._name})>"
@@ -106,7 +106,8 @@ class QTBDY(Unit):
106
106
  if self.timeunit == "DATES"
107
107
  else to_data_list(qtbdy_block[3:])
108
108
  )
109
-
109
+ if data_list == [[0]]:
110
+ data_list = [[0, 0]]
110
111
  self.data = pd.DataFrame(data_list, columns=["Flow", "Time"])
111
112
  self.data = self.data.set_index("Time")
112
113
  self.data = self.data["Flow"] # Convert to series
@@ -200,6 +201,8 @@ class HTBDY(Unit):
200
201
  if self.timeunit == "DATES"
201
202
  else to_data_list(htbdy_block[3:])
202
203
  )
204
+ if data_list == [[0]]:
205
+ data_list = [[0, 0]]
203
206
 
204
207
  self.data = pd.DataFrame(data_list, columns=["Stage", "Time"])
205
208
  self.data = self.data.set_index("Time")
@@ -183,7 +183,7 @@ class CONDUIT(Unit):
183
183
  """Function to read a given CONDUIT block and store data as class attributes"""
184
184
  self._subtype = self._get_first_word(c_block[1])
185
185
  # Extends label line to be correct length before splitting to pick up blank labels
186
- labels = split_n_char(f"{c_block[2]:<{2*self._label_len}}", self._label_len)
186
+ labels = split_n_char(f"{c_block[2]:<{2 * self._label_len}}", self._label_len)
187
187
  self.name = labels[0]
188
188
  self.spill = labels[1]
189
189
  self.comment = self._remove_unit_name(c_block[0])
@@ -79,7 +79,7 @@ class CULVERT(Unit):
79
79
  # Extract common attributes
80
80
  self._subtype = self._get_first_word(block[1])
81
81
  self.comment = self._remove_unit_name(block[0])
82
- labels = split_n_char(f"{block[2]:<{4*self._label_len}}", self._label_len)
82
+ labels = split_n_char(f"{block[2]:<{4 * self._label_len}}", self._label_len)
83
83
  self.name = labels[0]
84
84
  self.ds_label = labels[1]
85
85
  self.us_remote_label = labels[2]
@@ -195,7 +195,7 @@ class BLOCKAGE(Unit):
195
195
  self._revision, self.comment = self._get_revision_and_comment(block[0])
196
196
 
197
197
  # Extract labels
198
- labels = split_n_char(f"{block[1]:<{5*self._label_len}}", self._label_len)
198
+ labels = split_n_char(f"{block[1]:<{5 * self._label_len}}", self._label_len)
199
199
  self.name = labels[0]
200
200
  self.ds_label = labels[1]
201
201
  self.us_reference_label = labels[2]
@@ -235,6 +235,42 @@ class RIVER(Unit):
235
235
 
236
236
  return self._raw_block
237
237
 
238
+ @property
239
+ def location(self) -> tuple[float, float] | None:
240
+ # for RIVER units, source priority is as follows:
241
+ # 1. GXY location if defined
242
+ # 2. BED marker location if not (0,0)
243
+ # 3. Y-min location if not (0,0)
244
+ # 4. None
245
+ if self._location is not None:
246
+ return self._location
247
+
248
+ try:
249
+ bed_rows = self.active_data["Marker"] == "BED"
250
+ bed_points = self.active_data.loc[bed_rows]
251
+ first_bed = bed_points[["Easting", "Northing"]].iloc[0]
252
+ location = (float(first_bed["Easting"]), float(first_bed["Northing"]))
253
+ if location != (0, 0):
254
+ return location
255
+ except (ValueError, IndexError):
256
+ pass
257
+
258
+ try:
259
+ min_idx = self.active_data.Y.idxmin()
260
+ min_row = self.active_data.loc[min_idx]
261
+ location = (float(min_row["Easting"]), float(min_row["Northing"]))
262
+ if location != (0, 0):
263
+ return location
264
+ except (ValueError, IndexError):
265
+ pass
266
+
267
+ return None
268
+
269
+ @location.setter
270
+ def location(self, new_value: tuple[float, float] | None) -> None:
271
+ msg = "Currently unit location is read-only."
272
+ raise NotImplementedError(msg)
273
+
238
274
  @property
239
275
  def data(self) -> pd.DataFrame:
240
276
  """Data table for the river cross section.