floodmodeller-api 0.5.0.post1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. floodmodeller_api/__init__.py +11 -1
  2. floodmodeller_api/_base.py +55 -36
  3. floodmodeller_api/backup.py +15 -12
  4. floodmodeller_api/dat.py +191 -121
  5. floodmodeller_api/diff.py +4 -4
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +15 -14
  7. floodmodeller_api/ied.py +8 -10
  8. floodmodeller_api/ief.py +56 -42
  9. floodmodeller_api/ief_flags.py +1 -1
  10. floodmodeller_api/inp.py +7 -10
  11. floodmodeller_api/logs/lf.py +25 -26
  12. floodmodeller_api/logs/lf_helpers.py +20 -20
  13. floodmodeller_api/logs/lf_params.py +1 -5
  14. floodmodeller_api/mapping.py +11 -2
  15. floodmodeller_api/test/__init__.py +2 -2
  16. floodmodeller_api/test/conftest.py +2 -3
  17. floodmodeller_api/test/test_backup.py +2 -2
  18. floodmodeller_api/test/test_conveyance.py +13 -7
  19. floodmodeller_api/test/test_dat.py +168 -20
  20. floodmodeller_api/test/test_data/EX18_DAT_expected.json +164 -144
  21. floodmodeller_api/test/test_data/EX3_DAT_expected.json +6 -2
  22. floodmodeller_api/test/test_data/EX6_DAT_expected.json +12 -46
  23. floodmodeller_api/test/test_data/encoding_test_cp1252.dat +1081 -0
  24. floodmodeller_api/test/test_data/encoding_test_utf8.dat +1081 -0
  25. floodmodeller_api/test/test_data/integrated_bridge/AR_NoSP_NoBl_2O_NO_OneFRC.ied +33 -0
  26. floodmodeller_api/test/test_data/integrated_bridge/AR_vSP_25pc_1O.ied +32 -0
  27. floodmodeller_api/test/test_data/integrated_bridge/PL_vSP_25pc_1O.ied +34 -0
  28. floodmodeller_api/test/test_data/integrated_bridge/SBTwoFRCsStaggered.IED +32 -0
  29. floodmodeller_api/test/test_data/integrated_bridge/US_NoSP_NoBl_OR_RN.ied +28 -0
  30. floodmodeller_api/test/test_data/integrated_bridge/US_SP_NoBl_OR_frc_PT2-5_RN.ied +34 -0
  31. floodmodeller_api/test/test_data/integrated_bridge/US_fSP_NoBl_1O.ied +30 -0
  32. floodmodeller_api/test/test_data/integrated_bridge/US_nSP_NoBl_1O.ied +49 -0
  33. floodmodeller_api/test/test_data/integrated_bridge/US_vSP_NoBl_2O_Para.ied +35 -0
  34. floodmodeller_api/test/test_data/integrated_bridge.dat +40 -0
  35. floodmodeller_api/test/test_data/network.ied +2 -2
  36. floodmodeller_api/test/test_data/network_dat_expected.json +141 -243
  37. floodmodeller_api/test/test_data/network_ied_expected.json +2 -2
  38. floodmodeller_api/test/test_data/network_with_comments.ied +2 -2
  39. floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
  40. floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
  41. floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
  42. floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
  43. floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
  44. floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
  45. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
  46. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
  47. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
  48. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
  49. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
  50. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
  51. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
  52. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
  53. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
  54. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
  55. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
  56. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
  57. floodmodeller_api/test/test_flowtimeprofile.py +2 -2
  58. floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
  59. floodmodeller_api/test/test_ied.py +3 -3
  60. floodmodeller_api/test/test_ief.py +12 -4
  61. floodmodeller_api/test/test_inp.py +2 -2
  62. floodmodeller_api/test/test_integrated_bridge.py +159 -0
  63. floodmodeller_api/test/test_json.py +14 -13
  64. floodmodeller_api/test/test_logs_lf.py +50 -29
  65. floodmodeller_api/test/test_read_file.py +1 -0
  66. floodmodeller_api/test/test_river.py +12 -12
  67. floodmodeller_api/test/test_tool.py +8 -5
  68. floodmodeller_api/test/test_toolbox_structure_log.py +148 -158
  69. floodmodeller_api/test/test_xml2d.py +14 -16
  70. floodmodeller_api/test/test_zz.py +143 -0
  71. floodmodeller_api/to_from_json.py +9 -9
  72. floodmodeller_api/tool.py +15 -11
  73. floodmodeller_api/toolbox/example_tool.py +5 -1
  74. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +13 -9
  75. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +500 -194
  76. floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
  77. floodmodeller_api/units/__init__.py +15 -0
  78. floodmodeller_api/units/_base.py +87 -20
  79. floodmodeller_api/units/_helpers.py +343 -0
  80. floodmodeller_api/units/boundaries.py +59 -71
  81. floodmodeller_api/units/comment.py +1 -1
  82. floodmodeller_api/units/conduits.py +57 -54
  83. floodmodeller_api/units/connectors.py +112 -0
  84. floodmodeller_api/units/controls.py +107 -0
  85. floodmodeller_api/units/conveyance.py +1 -1
  86. floodmodeller_api/units/iic.py +2 -9
  87. floodmodeller_api/units/losses.py +44 -45
  88. floodmodeller_api/units/sections.py +52 -51
  89. floodmodeller_api/units/structures.py +361 -531
  90. floodmodeller_api/units/units.py +27 -26
  91. floodmodeller_api/units/unsupported.py +5 -7
  92. floodmodeller_api/units/variables.py +2 -2
  93. floodmodeller_api/urban1d/_base.py +13 -17
  94. floodmodeller_api/urban1d/conduits.py +11 -21
  95. floodmodeller_api/urban1d/general_parameters.py +1 -1
  96. floodmodeller_api/urban1d/junctions.py +7 -11
  97. floodmodeller_api/urban1d/losses.py +13 -17
  98. floodmodeller_api/urban1d/outfalls.py +18 -22
  99. floodmodeller_api/urban1d/raingauges.py +5 -10
  100. floodmodeller_api/urban1d/subsections.py +5 -4
  101. floodmodeller_api/urban1d/xsections.py +14 -17
  102. floodmodeller_api/util.py +23 -6
  103. floodmodeller_api/validation/parameters.py +7 -3
  104. floodmodeller_api/validation/urban_parameters.py +1 -4
  105. floodmodeller_api/validation/validation.py +11 -5
  106. floodmodeller_api/version.py +1 -1
  107. floodmodeller_api/xml2d.py +27 -31
  108. floodmodeller_api/xml2d_template.py +1 -1
  109. floodmodeller_api/zz.py +539 -0
  110. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/LICENSE.txt +1 -1
  111. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/METADATA +30 -16
  112. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/RECORD +116 -83
  113. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/WHEEL +1 -1
  114. floodmodeller_api/test/test_zzn.py +0 -36
  115. floodmodeller_api/units/helpers.py +0 -123
  116. floodmodeller_api/zzn.py +0 -414
  117. /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
  118. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/entry_points.txt +0 -0
  119. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/top_level.txt +0 -0
@@ -72,7 +72,7 @@ def calculate_cross_section_conveyance(
72
72
  total_length = np.where(in_panel_and_section, length, 0).sum(axis=1)
73
73
  total_mannings = np.where(in_panel_and_section, mannings, 0).sum(axis=1)
74
74
 
75
- with np.errstate(invalid="ignore"):
75
+ with np.errstate(divide="ignore", invalid="ignore"):
76
76
  conveyance += np.where(
77
77
  total_length >= MINIMUM_PERIMETER_THRESHOLD,
78
78
  total_area ** (5 / 3) * total_length ** (1 / 3) / (total_mannings * rpl_panel),
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
4
 
5
5
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
6
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
@@ -18,7 +18,7 @@ import pandas as pd
18
18
 
19
19
  from ..diff import check_item_with_dataframe_equal
20
20
  from ..to_from_json import Jsonable
21
- from .helpers import join_10_char, split_10_char
21
+ from ._helpers import join_10_char, split_10_char
22
22
 
23
23
  # Initial Conditions Class
24
24
 
@@ -65,15 +65,8 @@ class IIC(Jsonable):
65
65
  float(z),
66
66
  ],
67
67
  )
68
- # AL is this storing the values as strings?
69
68
  self.data = pd.DataFrame(data_list, columns=header)
70
- # JP Yes
71
- # AL If it does, would it worth making it store the values instead?
72
- # JP Yes I'll do that, only downside is that the updated values may not match notation
73
- # of original even if no changes. (i.e 2.0 -> 2.00 or 2. -> 2.00)
74
69
 
75
- # AL Is this only to transform the table of data into a string-like array?
76
- # JP Yes it just transforms the dataframe back into valid DAT format
77
70
  def _write(self):
78
71
  ic_block = [
79
72
  "INITIAL CONDITIONS",
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
4
 
5
5
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
6
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
@@ -14,20 +14,21 @@ If you have any query about this program or this License, please contact us at s
14
14
  address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
15
  """
16
16
 
17
+ import logging
18
+
17
19
  import pandas as pd
18
20
 
19
21
  from floodmodeller_api.validation import _validate_unit
20
22
 
21
23
  from ._base import Unit
22
- from .helpers import (
23
- _to_data_list,
24
- _to_float,
25
- _to_int,
26
- _to_str,
24
+ from ._helpers import (
27
25
  join_10_char,
28
26
  join_n_char_ljust,
29
27
  split_10_char,
30
28
  split_n_char,
29
+ to_data_list,
30
+ to_float,
31
+ to_str,
31
32
  )
32
33
 
33
34
 
@@ -76,8 +77,8 @@ class CULVERT(Unit):
76
77
  """Function to read a given CULVERT block and store data as class attributes"""
77
78
 
78
79
  # Extract common attributes
79
- self._subtype = block[1].split(" ")[0].strip()
80
- self.comment = block[0].replace("CULVERT", "").strip()
80
+ self._subtype = self._get_first_word(block[1])
81
+ self.comment = self._remove_unit_name(block[0])
81
82
  labels = split_n_char(f"{block[2]:<{4*self._label_len}}", self._label_len)
82
83
  self.name = labels[0]
83
84
  self.ds_label = labels[1]
@@ -91,44 +92,43 @@ class CULVERT(Unit):
91
92
 
92
93
  # Read first set of general parameters
93
94
  params = split_10_char(f"{block[3]:<60}")
94
- self.k = _to_float(params[0], 0.0)
95
- self.m = _to_float(params[1], 0.0)
96
- self.c = _to_float(params[2], 0.0)
97
- self.y = _to_float(params[3], 0.0)
98
- self.ki = _to_float(params[4], 0.0)
99
- self.type_code = _to_str(params[5], "A")
95
+ self.k = to_float(params[0], 0.0)
96
+ self.m = to_float(params[1], 0.0)
97
+ self.c = to_float(params[2], 0.0)
98
+ self.y = to_float(params[3], 0.0)
99
+ self.ki = to_float(params[4], 0.0)
100
+ self.type_code = to_str(params[5], "A")
100
101
 
101
102
  # Read trash screen and remaining general parameters
102
103
  params1 = split_10_char(f"{block[4]:<70}")
103
- self.screen_width = _to_float(params1[0], 0.0)
104
- self.bar_proportion = _to_float(params1[1], 0.0)
105
- self.debris_proportion = _to_float(params1[2], 0.0)
106
- self.loss_coefficient = _to_float(params1[3], 0.0)
107
- self.reverse_flow_mode = _to_str(params1[4], "CALCULATED", check_float=True)
108
- self.headloss_type = _to_str(params1[5], "TOTAL")
109
- self.max_screen_height = _to_float(params1[6], 0.0)
104
+ self.screen_width = to_float(params1[0], 0.0)
105
+ self.bar_proportion = to_float(params1[1], 0.0)
106
+ self.debris_proportion = to_float(params1[2], 0.0)
107
+ self.loss_coefficient = to_float(params1[3], 0.0)
108
+ self.reverse_flow_mode = to_str(params1[4], "CALCULATED", check_float=True)
109
+ self.headloss_type = to_str(params1[5], "TOTAL")
110
+ self.max_screen_height = to_float(params1[6], 0.0)
110
111
 
111
112
  elif self.subtype == "OUTLET":
112
113
  params = split_10_char(f"{block[3]:<30}")
113
- self.loss_coefficient = _to_float(params[0], 1.0)
114
- self.reverse_flow_mode = _to_str(params[1], "CALCULATED")
115
- self.headloss_type = _to_str(params[2], "TOTAL")
114
+ self.loss_coefficient = to_float(params[0], 1.0)
115
+ self.reverse_flow_mode = to_str(params[1], "CALCULATED")
116
+ self.headloss_type = to_str(params[2], "TOTAL")
116
117
 
117
118
  else:
118
- # This else block is triggered for culvert subtypes which aren't yet supported, and just keeps the '_block' in it's raw state to write back.
119
- print(
120
- f'This Culvert sub-type: "{self.subtype}" is currently unsupported for reading/editing',
119
+ # This else block is triggered for culvert subtypes which aren't yet supported, and just keeps the '_block' in its raw state to write back.
120
+ logging.warning(
121
+ "This Culvert sub-type: '%s' is currently unsupported for reading/editing",
122
+ self.subtype,
121
123
  )
122
124
  self._raw_block = block
123
125
 
124
- # TODO: Create from blank. Not supported currently as CULVERT has multiple subtypes
125
-
126
126
  def _write(self):
127
127
  """Function to write a valid CULVERT block"""
128
128
 
129
129
  _validate_unit(self)
130
130
 
131
- header = "CULVERT " + self.comment
131
+ header = self._create_header()
132
132
  labels = join_n_char_ljust(
133
133
  self._label_len,
134
134
  self.name,
@@ -192,9 +192,7 @@ class BLOCKAGE(Unit):
192
192
  """Function to read a given BLOCKAGE block and store data as class attributes"""
193
193
 
194
194
  # Extract comment and revision number
195
- b = block[0].replace("BLOCKAGE #revision#", " ").strip()
196
- self._revision = _to_int(b[0], 1)
197
- self.comment = b[1:].strip()
195
+ self._revision, self.comment = self._get_revision_and_comment(block[0])
198
196
 
199
197
  # Extract labels
200
198
  labels = split_n_char(f"{block[1]:<{5*self._label_len}}", self._label_len)
@@ -206,25 +204,27 @@ class BLOCKAGE(Unit):
206
204
 
207
205
  # Extract inlet and outlet loss coefficients
208
206
  params = split_10_char(f"{block[2]:<20}")
209
- self.inlet_loss = _to_float(params[0], 1.5)
210
- self.outlet_loss = _to_float(params[1], 1.0)
207
+ self.inlet_loss = to_float(params[0], 1.5)
208
+ self.outlet_loss = to_float(params[1], 1.0)
211
209
 
212
210
  # Extract blockage timeseries parameters
213
211
  params1 = split_10_char(f"{block[3]:<40}")
214
212
  self.nrows = int(params1[0])
215
- self.timeoffset = _to_float(params1[1])
213
+ self.timeoffset = to_float(params1[1])
216
214
 
217
- self.timeunit = _to_str(params1[2], "HOURS", check_float=True)
215
+ self.timeunit = to_str(params1[2], "HOURS", check_float=True)
218
216
  if self.timeunit == "DATE":
219
- self.timeunit = "DATES" # Parameter value updated to 'DATES' for consistency with other unit types. 'DATE' and 'DATES' both accepted for blockage unit ONLY
217
+ self.timeunit = "DATES"
218
+ # Parameter value updated to 'DATES' for consistency with other unit types.
219
+ # 'DATE' and 'DATES' both accepted for blockage unit ONLY
220
220
 
221
- self.extendmethod = _to_str(params1[3], "NOEXTEND")
221
+ self.extendmethod = to_str(params1[3], "NOEXTEND")
222
222
 
223
223
  # Extract blockage to timeseries
224
224
  data_list = (
225
- _to_data_list(block[4:], num_cols=2, date_col=0)
225
+ to_data_list(block[4:], num_cols=2, date_col=0)
226
226
  if self.timeunit == "DATES"
227
- else _to_data_list(block[4:], num_cols=2)
227
+ else to_data_list(block[4:], num_cols=2)
228
228
  ) # Enforced two columns as Flood Modeller saves old parameters when using DATES (also to avoid extra 'HOURS' bug)
229
229
 
230
230
  self.data = pd.DataFrame(data_list, columns=["Time", "Blockage"])
@@ -238,11 +238,10 @@ class BLOCKAGE(Unit):
238
238
 
239
239
  # Custom validation for blockage percentage
240
240
  if self.data.max() > 1 or self.data.min() < 0:
241
- raise ValueError(
242
- f"Parameter error with {repr(self)} - blockage percentage must be between 0 and 1",
243
- )
241
+ msg = f"Parameter error with {self!r} - blockage percentage must be between 0 and 1"
242
+ raise ValueError(msg)
244
243
 
245
- header = f"BLOCKAGE #revision#{self._revision} {self.comment}"
244
+ header = self._create_header(include_revision=True)
246
245
  labels = join_n_char_ljust(
247
246
  self._label_len,
248
247
  self.name,
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
4
 
5
5
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
6
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
@@ -14,20 +14,24 @@ If you have any query about this program or this License, please contact us at s
14
14
  address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
15
  """
16
16
 
17
+ from __future__ import annotations
18
+
19
+ import logging
20
+
17
21
  import pandas as pd
18
22
 
19
23
  from floodmodeller_api.validation import _validate_unit
20
24
 
21
25
  from ._base import Unit
22
- from .conveyance import calculate_cross_section_conveyance_cached
23
- from .helpers import (
24
- _to_float,
25
- _to_int,
26
+ from ._helpers import (
26
27
  join_10_char,
27
28
  join_n_char_ljust,
28
29
  split_10_char,
29
30
  split_n_char,
31
+ to_float,
32
+ to_int,
30
33
  )
34
+ from .conveyance import calculate_cross_section_conveyance_cached
31
35
 
32
36
 
33
37
  class RIVER(Unit):
@@ -54,7 +58,7 @@ class RIVER(Unit):
54
58
  """
55
59
 
56
60
  _unit = "RIVER"
57
- _required_columns = [
61
+ _required_columns = (
58
62
  "X",
59
63
  "Y",
60
64
  "Mannings n",
@@ -65,7 +69,7 @@ class RIVER(Unit):
65
69
  "Northing",
66
70
  "Deactivation",
67
71
  "SP. Marker",
68
- ]
72
+ )
69
73
 
70
74
  def _create_from_blank( # noqa: PLR0913
71
75
  self,
@@ -100,24 +104,18 @@ class RIVER(Unit):
100
104
  }.items():
101
105
  setattr(self, param, val)
102
106
 
103
- self._data = (
104
- data
105
- if isinstance(data, pd.DataFrame)
106
- else pd.DataFrame(
107
- [],
108
- columns=self._required_columns,
109
- )
110
- )
107
+ self._data = self._enforce_dataframe(data, self._required_columns)
111
108
  self._active_data = None
112
109
 
113
110
  def _read(self, riv_block):
114
111
  """Function to read a given RIVER block and store data as class attributes."""
115
112
 
116
113
  self._subtype = riv_block[1].split(" ")[0].strip()
114
+ # Extends label line to be correct length before splitting to pick up blank labels
115
+ labels = split_n_char(f"{riv_block[2]:<{7*self._label_len}}", self._label_len)
116
+
117
117
  # Only supporting 'SECTION' subtype for now
118
118
  if self.subtype == "SECTION":
119
- # Extends label line to be correct length before splitting to pick up blank labels
120
- labels = split_n_char(f"{riv_block[2]:<{7*self._label_len}}", self._label_len)
121
119
  self.name = labels[0]
122
120
  self.spill1 = labels[1]
123
121
  self.spill2 = labels[2]
@@ -125,19 +123,19 @@ class RIVER(Unit):
125
123
  self.lat2 = labels[4]
126
124
  self.lat3 = labels[5]
127
125
  self.lat4 = labels[6]
128
- self.comment = riv_block[0].replace("RIVER", "").strip()
126
+ self.comment = self._remove_unit_name(riv_block[0])
129
127
 
130
128
  params = split_10_char(f"{riv_block[3]:<40}")
131
- self.dist_to_next = _to_float(params[0])
132
- self.slope = _to_float(params[2], 0.0001)
133
- self.density = _to_float(params[3], 1000.0)
129
+ self.dist_to_next = to_float(params[0])
130
+ self.slope = to_float(params[2], 0.0001)
131
+ self.density = to_float(params[3], 1000.0)
134
132
  self.nrows = int(split_10_char(riv_block[4])[0])
135
133
  data_list = []
136
134
  for row in riv_block[5:]:
137
135
  row_split = split_10_char(f"{row:<100}")
138
- x = _to_float(row_split[0]) # chainage
139
- y = _to_float(row_split[1]) # elevation
140
- n = _to_float(row_split[2]) # Mannings
136
+ x = to_float(row_split[0]) # chainage
137
+ y = to_float(row_split[1]) # elevation
138
+ n = to_float(row_split[2]) # Mannings
141
139
  try:
142
140
  # panel marker
143
141
  panel = row_split[3][0] == "*"
@@ -146,15 +144,15 @@ class RIVER(Unit):
146
144
 
147
145
  try:
148
146
  # relative path length
149
- rpl = _to_float(row_split[3][1 if panel else 0 :].strip())
147
+ rpl = to_float(row_split[3][1 if panel else 0 :].strip())
150
148
  except IndexError:
151
149
  rpl = 0.000
152
150
  marker = row_split[4] # Marker
153
- easting = _to_float(row_split[5]) # easting
154
- northing = _to_float(row_split[6]) # northing
151
+ easting = to_float(row_split[5]) # easting
152
+ northing = to_float(row_split[6]) # northing
155
153
 
156
154
  deactivation = row_split[7] # deactivation marker
157
- sp_marker = _to_int(row_split[8]) # special marker
155
+ sp_marker = to_int(row_split[8]) # special marker
158
156
  data_list.append(
159
157
  [
160
158
  x,
@@ -176,11 +174,14 @@ class RIVER(Unit):
176
174
 
177
175
  else:
178
176
  # This else block is triggered for river subtypes which aren't yet supported, and just keeps the 'riv_block' in it's raw state to write back.
179
- print(
180
- f'This River sub-type: "{self.subtype}" is currently unsupported for reading/editing',
177
+ logging.warning(
178
+ "This River sub-type: '%s' is currently unsupported for reading/editing",
179
+ self.subtype,
181
180
  )
182
181
  self._raw_block = riv_block
183
182
  self.name = riv_block[2][: self._label_len].strip()
183
+ self.dist_to_next = to_float(riv_block[3][:10])
184
+ self.labels = labels
184
185
 
185
186
  self._active_data = None
186
187
 
@@ -190,7 +191,7 @@ class RIVER(Unit):
190
191
  if self.subtype == "SECTION":
191
192
  # Function to check the params are valid for RIVER SECTION unit
192
193
  _validate_unit(self)
193
- header = "RIVER " + self.comment
194
+ header = self._create_header()
194
195
  labels = join_n_char_ljust(
195
196
  self._label_len,
196
197
  self.name,
@@ -204,7 +205,7 @@ class RIVER(Unit):
204
205
  # Manual so slope can have more sf
205
206
  params = f'{self.dist_to_next:>10.3f}{"":>10}{self.slope:>10.6f}{self.density:>10.3f}'
206
207
  self.nrows = len(self._data)
207
- riv_block = [header, self.subtype, labels, params, f"{str(self.nrows):>10}"]
208
+ riv_block = [header, self.subtype, labels, params, f"{self.nrows!s:>10}"]
208
209
 
209
210
  riv_data = []
210
211
  for (
@@ -257,11 +258,11 @@ class RIVER(Unit):
257
258
  @data.setter
258
259
  def data(self, new_df: pd.DataFrame) -> None:
259
260
  if not isinstance(new_df, pd.DataFrame):
260
- raise ValueError(
261
- "The updated data table for a cross section must be a pandas DataFrame.",
262
- )
261
+ msg = "The updated data table for a cross section must be a pandas DataFrame."
262
+ raise ValueError(msg)
263
263
  if list(map(str.lower, new_df.columns)) != list(map(str.lower, self._required_columns)):
264
- raise ValueError(f"The DataFrame must only contain columns: {self._required_columns}")
264
+ msg = f"The DataFrame must only contain columns: {self._required_columns}"
265
+ raise ValueError(msg)
265
266
  self._data = new_df
266
267
 
267
268
  @property
@@ -325,11 +326,11 @@ class RIVER(Unit):
325
326
  @active_data.setter
326
327
  def active_data(self, new_df: pd.DataFrame) -> None:
327
328
  if not isinstance(new_df, pd.DataFrame):
328
- raise ValueError(
329
- "The updated data table for a cross section must be a pandas DataFrame.",
330
- )
329
+ msg = "The updated data table for a cross section must be a pandas DataFrame."
330
+ raise ValueError(msg)
331
331
  if new_df.columns.to_list() != self._required_columns:
332
- raise ValueError(f"The DataFrame must only contain columns: {self._required_columns}")
332
+ msg = f"The DataFrame must only contain columns: {self._required_columns}"
333
+ raise ValueError(msg)
333
334
 
334
335
  # Ensure activation markers are present
335
336
  new_df = new_df.copy()
@@ -380,19 +381,19 @@ class INTERPOLATE(Unit):
380
381
  self.lat2 = labels[4]
381
382
  self.lat3 = labels[5]
382
383
  self.lat4 = labels[6]
383
- self.comment = block[0].replace("INTERPOLATE", "").strip()
384
+ self.comment = self._remove_unit_name(block[0])
384
385
 
385
386
  # First parameter line
386
387
  params1 = split_10_char(f"{block[2]:<30}")
387
- self.dist_to_next = _to_float(params1[0])
388
- self.easting = _to_float(params1[1])
389
- self.northing = _to_float(params1[2])
388
+ self.dist_to_next = to_float(params1[0])
389
+ self.easting = to_float(params1[1])
390
+ self.northing = to_float(params1[2])
390
391
 
391
392
  def _write(self):
392
393
  """Function to write a valid INTERPOLATE block"""
393
394
 
394
395
  _validate_unit(self)
395
- header = "INTERPOLATE " + self.comment
396
+ header = self._create_header()
396
397
  labels = join_n_char_ljust(
397
398
  self._label_len,
398
399
  self.name,
@@ -476,20 +477,20 @@ class REPLICATE(Unit):
476
477
  self.lat3 = labels[5]
477
478
  self.lat4 = labels[6]
478
479
 
479
- self.comment = block[0].replace("REPLICATE", "").strip()
480
+ self.comment = self._remove_unit_name(block[0])
480
481
 
481
482
  # First parameter line
482
483
  params1 = split_10_char(f"{block[2]:<40}")
483
- self.dist_to_next = _to_float(params1[0])
484
- self.bed_level_drop = _to_float(params1[1])
485
- self.easting = _to_float(params1[2])
486
- self.northing = _to_float(params1[3])
484
+ self.dist_to_next = to_float(params1[0])
485
+ self.bed_level_drop = to_float(params1[1])
486
+ self.easting = to_float(params1[2])
487
+ self.northing = to_float(params1[3])
487
488
 
488
489
  def _write(self):
489
490
  """Function to write a valid REPLICATE block"""
490
491
 
491
492
  _validate_unit(self)
492
- header = "REPLICATE " + self.comment
493
+ header = self._create_header()
493
494
  labels = join_n_char_ljust(
494
495
  self._label_len,
495
496
  self.name,