floodmodeller-api 0.5.0.post1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. floodmodeller_api/__init__.py +11 -1
  2. floodmodeller_api/_base.py +55 -36
  3. floodmodeller_api/backup.py +15 -12
  4. floodmodeller_api/dat.py +191 -121
  5. floodmodeller_api/diff.py +4 -4
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +15 -14
  7. floodmodeller_api/ied.py +8 -10
  8. floodmodeller_api/ief.py +56 -42
  9. floodmodeller_api/ief_flags.py +1 -1
  10. floodmodeller_api/inp.py +7 -10
  11. floodmodeller_api/logs/lf.py +25 -26
  12. floodmodeller_api/logs/lf_helpers.py +20 -20
  13. floodmodeller_api/logs/lf_params.py +1 -5
  14. floodmodeller_api/mapping.py +11 -2
  15. floodmodeller_api/test/__init__.py +2 -2
  16. floodmodeller_api/test/conftest.py +2 -3
  17. floodmodeller_api/test/test_backup.py +2 -2
  18. floodmodeller_api/test/test_conveyance.py +13 -7
  19. floodmodeller_api/test/test_dat.py +168 -20
  20. floodmodeller_api/test/test_data/EX18_DAT_expected.json +164 -144
  21. floodmodeller_api/test/test_data/EX3_DAT_expected.json +6 -2
  22. floodmodeller_api/test/test_data/EX6_DAT_expected.json +12 -46
  23. floodmodeller_api/test/test_data/encoding_test_cp1252.dat +1081 -0
  24. floodmodeller_api/test/test_data/encoding_test_utf8.dat +1081 -0
  25. floodmodeller_api/test/test_data/integrated_bridge/AR_NoSP_NoBl_2O_NO_OneFRC.ied +33 -0
  26. floodmodeller_api/test/test_data/integrated_bridge/AR_vSP_25pc_1O.ied +32 -0
  27. floodmodeller_api/test/test_data/integrated_bridge/PL_vSP_25pc_1O.ied +34 -0
  28. floodmodeller_api/test/test_data/integrated_bridge/SBTwoFRCsStaggered.IED +32 -0
  29. floodmodeller_api/test/test_data/integrated_bridge/US_NoSP_NoBl_OR_RN.ied +28 -0
  30. floodmodeller_api/test/test_data/integrated_bridge/US_SP_NoBl_OR_frc_PT2-5_RN.ied +34 -0
  31. floodmodeller_api/test/test_data/integrated_bridge/US_fSP_NoBl_1O.ied +30 -0
  32. floodmodeller_api/test/test_data/integrated_bridge/US_nSP_NoBl_1O.ied +49 -0
  33. floodmodeller_api/test/test_data/integrated_bridge/US_vSP_NoBl_2O_Para.ied +35 -0
  34. floodmodeller_api/test/test_data/integrated_bridge.dat +40 -0
  35. floodmodeller_api/test/test_data/network.ied +2 -2
  36. floodmodeller_api/test/test_data/network_dat_expected.json +141 -243
  37. floodmodeller_api/test/test_data/network_ied_expected.json +2 -2
  38. floodmodeller_api/test/test_data/network_with_comments.ied +2 -2
  39. floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
  40. floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
  41. floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
  42. floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
  43. floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
  44. floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
  45. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
  46. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
  47. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
  48. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
  49. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
  50. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
  51. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
  52. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
  53. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
  54. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
  55. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
  56. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
  57. floodmodeller_api/test/test_flowtimeprofile.py +2 -2
  58. floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
  59. floodmodeller_api/test/test_ied.py +3 -3
  60. floodmodeller_api/test/test_ief.py +12 -4
  61. floodmodeller_api/test/test_inp.py +2 -2
  62. floodmodeller_api/test/test_integrated_bridge.py +159 -0
  63. floodmodeller_api/test/test_json.py +14 -13
  64. floodmodeller_api/test/test_logs_lf.py +50 -29
  65. floodmodeller_api/test/test_read_file.py +1 -0
  66. floodmodeller_api/test/test_river.py +12 -12
  67. floodmodeller_api/test/test_tool.py +8 -5
  68. floodmodeller_api/test/test_toolbox_structure_log.py +148 -158
  69. floodmodeller_api/test/test_xml2d.py +14 -16
  70. floodmodeller_api/test/test_zz.py +143 -0
  71. floodmodeller_api/to_from_json.py +9 -9
  72. floodmodeller_api/tool.py +15 -11
  73. floodmodeller_api/toolbox/example_tool.py +5 -1
  74. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +13 -9
  75. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +500 -194
  76. floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
  77. floodmodeller_api/units/__init__.py +15 -0
  78. floodmodeller_api/units/_base.py +87 -20
  79. floodmodeller_api/units/_helpers.py +343 -0
  80. floodmodeller_api/units/boundaries.py +59 -71
  81. floodmodeller_api/units/comment.py +1 -1
  82. floodmodeller_api/units/conduits.py +57 -54
  83. floodmodeller_api/units/connectors.py +112 -0
  84. floodmodeller_api/units/controls.py +107 -0
  85. floodmodeller_api/units/conveyance.py +1 -1
  86. floodmodeller_api/units/iic.py +2 -9
  87. floodmodeller_api/units/losses.py +44 -45
  88. floodmodeller_api/units/sections.py +52 -51
  89. floodmodeller_api/units/structures.py +361 -531
  90. floodmodeller_api/units/units.py +27 -26
  91. floodmodeller_api/units/unsupported.py +5 -7
  92. floodmodeller_api/units/variables.py +2 -2
  93. floodmodeller_api/urban1d/_base.py +13 -17
  94. floodmodeller_api/urban1d/conduits.py +11 -21
  95. floodmodeller_api/urban1d/general_parameters.py +1 -1
  96. floodmodeller_api/urban1d/junctions.py +7 -11
  97. floodmodeller_api/urban1d/losses.py +13 -17
  98. floodmodeller_api/urban1d/outfalls.py +18 -22
  99. floodmodeller_api/urban1d/raingauges.py +5 -10
  100. floodmodeller_api/urban1d/subsections.py +5 -4
  101. floodmodeller_api/urban1d/xsections.py +14 -17
  102. floodmodeller_api/util.py +23 -6
  103. floodmodeller_api/validation/parameters.py +7 -3
  104. floodmodeller_api/validation/urban_parameters.py +1 -4
  105. floodmodeller_api/validation/validation.py +11 -5
  106. floodmodeller_api/version.py +1 -1
  107. floodmodeller_api/xml2d.py +27 -31
  108. floodmodeller_api/xml2d_template.py +1 -1
  109. floodmodeller_api/zz.py +539 -0
  110. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/LICENSE.txt +1 -1
  111. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/METADATA +30 -16
  112. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/RECORD +116 -83
  113. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/WHEEL +1 -1
  114. floodmodeller_api/test/test_zzn.py +0 -36
  115. floodmodeller_api/units/helpers.py +0 -123
  116. floodmodeller_api/zzn.py +0 -414
  117. /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
  118. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/entry_points.txt +0 -0
  119. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,7 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import ClassVar
4
+
1
5
  from floodmodeller_api.tool import FMTool, Parameter
2
6
 
3
7
  from .structure_log import StructureLogBuilder
@@ -50,7 +54,7 @@ class StructureLog(FMTool):
50
54
 
51
55
  name = "Structure Log"
52
56
  description = "Creates a structure log"
53
- parameters = [
57
+ parameters: ClassVar[list[Parameter]] = [
54
58
  Parameter(
55
59
  name="input_path",
56
60
  dtype=str,
@@ -1,6 +1,10 @@
1
+ from typing import TypeAlias
2
+
1
3
  from .boundaries import HTBDY, QHBDY, QTBDY, REFHBDY
2
4
  from .comment import COMMENT
3
5
  from .conduits import CONDUIT
6
+ from .connectors import JUNCTION, LATERAL
7
+ from .controls import RESERVOIR
4
8
  from .iic import IIC
5
9
  from .losses import BLOCKAGE, CULVERT
6
10
  from .sections import INTERPOLATE, REPLICATE, RIVER
@@ -18,3 +22,14 @@ from .structures import (
18
22
  from .units import ALL_UNIT_TYPES, SUPPORTED_UNIT_TYPES, UNSUPPORTED_UNIT_TYPES
19
23
  from .unsupported import UNSUPPORTED
20
24
  from .variables import Variables
25
+
26
+ TBoundaries: TypeAlias = HTBDY | QHBDY | QTBDY | REFHBDY
27
+ TSections: TypeAlias = INTERPOLATE | REPLICATE | RIVER
28
+ TConduits: TypeAlias = CONDUIT
29
+ TConnectors: TypeAlias = JUNCTION | LATERAL
30
+ TControls: TypeAlias = RESERVOIR
31
+ TLosses: TypeAlias = BLOCKAGE | CULVERT
32
+ TStructures: TypeAlias = (
33
+ BRIDGE | CRUMP | FLAT_V_WEIR | ORIFICE | OUTFALL | RNWEIR | SLUICE | SPILL | WEIR
34
+ )
35
+ TUnsupported: TypeAlias = UNSUPPORTED
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
4
 
5
5
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
6
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
@@ -18,11 +18,15 @@ from __future__ import annotations
18
18
 
19
19
  """ Holds the base unit class for all FM Units """
20
20
 
21
+ import logging
22
+ from itertools import chain
23
+ from typing import Any
24
+
21
25
  import pandas as pd
22
26
 
23
27
  from ..diff import check_item_with_dataframe_equal
24
28
  from ..to_from_json import Jsonable
25
- from .helpers import _to_float, _to_str, join_10_char, join_n_char_ljust, split_10_char
29
+ from ._helpers import join_10_char, join_n_char_ljust, split_10_char, to_float, to_int, to_str
26
30
 
27
31
 
28
32
  class Unit(Jsonable):
@@ -40,7 +44,11 @@ class Unit(Jsonable):
40
44
  self._create_from_blank(**kwargs)
41
45
 
42
46
  @property
43
- def name(self):
47
+ def unit(self) -> str:
48
+ return self._unit
49
+
50
+ @property
51
+ def name(self) -> str | None:
44
52
  return self._name
45
53
 
46
54
  @name.setter
@@ -48,20 +56,51 @@ class Unit(Jsonable):
48
56
  try:
49
57
  new_name = str(new_name)
50
58
  if " " in new_name:
51
- raise Exception(
52
- f'Cannot set unit name to "{new_name}" as it contains one or more spaces',
53
- )
59
+ msg = f'Cannot set unit name to "{new_name}" as it contains one or more spaces'
60
+ raise Exception(msg)
54
61
  self._name = new_name
55
62
  except Exception as e:
56
- raise Exception(f'Failed to set unit name to "{new_name}" due to error: {e}') from e
63
+ msg = f'Failed to set unit name to "{new_name}" due to error: {e}'
64
+ raise Exception(msg) from e
65
+
66
+ @property
67
+ def all_labels(self) -> set[str]:
68
+ """All explicit labels associated with a unit."""
69
+ label_attrs = [
70
+ "name",
71
+ "spill",
72
+ "spill1",
73
+ "spill2",
74
+ "first_spill",
75
+ "second_spill",
76
+ "lat1",
77
+ "lat2",
78
+ "lat3",
79
+ "lat4",
80
+ "ds_label",
81
+ ]
82
+ label_list_attrs = ["labels", "lateral_inflow_labels"]
83
+
84
+ labels = {getattr(self, x) for x in label_attrs if hasattr(self, x)}
85
+ label_lists = [getattr(self, x) for x in label_list_attrs if hasattr(self, x)]
86
+
87
+ return (labels | set(chain(*label_lists))) - {""}
57
88
 
58
89
  @property
59
- def subtype(self):
90
+ def unique_name(self) -> str:
91
+ if self._name is None:
92
+ msg = "No unique name available."
93
+ raise ValueError(msg)
94
+ return f"{self._unit}_{self._name}"
95
+
96
+ @property
97
+ def subtype(self) -> str | None:
60
98
  return self._subtype
61
99
 
62
100
  @subtype.setter
63
101
  def subtype(self, new_value):
64
- raise ValueError("You cannot change the subtype of a unit once it has been instantiated")
102
+ msg = "You cannot change the subtype of a unit once it has been instantiated"
103
+ raise ValueError(msg)
65
104
 
66
105
  def __repr__(self):
67
106
  if self._subtype is None:
@@ -71,9 +110,8 @@ class Unit(Jsonable):
71
110
  )
72
111
 
73
112
  def _create_from_blank(self):
74
- raise NotImplementedError(
75
- f"Creating new {self._unit} units is not yet supported by floodmodeller_api, only existing units can be read",
76
- )
113
+ msg = f"Creating new {self._unit} units is not yet supported by floodmodeller_api, only existing units can be read"
114
+ raise NotImplementedError(msg)
77
115
 
78
116
  def __str__(self):
79
117
  return "\n".join(self._write())
@@ -87,14 +125,17 @@ class Unit(Jsonable):
87
125
  def _diff(self, other):
88
126
  diff = self._get_diff(other)
89
127
  if diff[0]:
90
- print("No difference, units are equivalent")
128
+ logging.info("No difference, units are equivalent")
91
129
  else:
92
- print("\n".join([f"{name}: {reason}" for name, reason in diff[1]]))
130
+ logging.info("\n".join([f"{name}: {reason}" for name, reason in diff[1]]))
93
131
 
94
132
  def _get_diff(self, other):
95
133
  return self.__eq__(other, return_diff=True) # pylint: disable=unnecessary-dunder-call
96
134
 
97
135
  def __eq__(self, other, return_diff=False):
136
+ if not isinstance(other, Unit):
137
+ return NotImplemented if not return_diff else (False, ["Type mismatch"])
138
+
98
139
  result = True
99
140
  diff = []
100
141
  result, diff = check_item_with_dataframe_equal(
@@ -110,16 +151,16 @@ class Unit(Jsonable):
110
151
  def _read_rules(self, block):
111
152
  rule_params = split_10_char(block[self._last_gate_row + 1])
112
153
  self.nrules = int(rule_params[0])
113
- self.rule_sample_time = _to_float(rule_params[1])
114
- self.timeunit = _to_str(rule_params[2], "SECONDS", check_float=False)
115
- self.extendmethod = _to_str(rule_params[3], "EXTEND")
154
+ self.rule_sample_time = to_float(rule_params[1])
155
+ self.timeunit = to_str(rule_params[2], "SECONDS", check_float=False)
156
+ self.extendmethod = to_str(rule_params[3], "EXTEND")
116
157
  self.rules = self._get_logical_rules(self.nrules, block, self._last_gate_row + 2)
117
158
  # Get time rule data set
118
159
  nrows = int(split_10_char(block[self._last_rule_row + 1])[0])
119
160
  data_list = []
120
161
  for row in block[self._last_rule_row + 2 : self._last_rule_row + 2 + nrows]:
121
162
  row_split = split_10_char(f"{row:<20}")
122
- x = _to_float(row_split[0]) # time
163
+ x = to_float(row_split[0]) # time
123
164
  y = row[10:].strip() # operating rules
124
165
  data_list.append([x, y])
125
166
  self._last_time_row = self._last_rule_row + nrows + 1
@@ -135,14 +176,14 @@ class Unit(Jsonable):
135
176
  self.has_varrules = True
136
177
  varrule_params = split_10_char(block[self._last_time_row + 2])
137
178
  self.nvarrules = int(varrule_params[0])
138
- self.varrule_sample_time = _to_float(rule_params[1])
179
+ self.varrule_sample_time = to_float(rule_params[1])
139
180
  self.varrules = self._get_logical_rules(self.nvarrules, block, self._last_time_row + 3)
140
181
  # Get time rule data set
141
182
  var_nrows = int(split_10_char(block[self._last_rule_row + 1])[0])
142
183
  data_list = []
143
184
  for row in block[self._last_rule_row + 2 : self._last_rule_row + 2 + var_nrows]:
144
185
  row_split = split_10_char(f"{row:<20}")
145
- x = _to_float(row_split[0]) # time
186
+ x = to_float(row_split[0]) # time
146
187
  y = row[10:].strip() # operating rules
147
188
  data_list.append([x, y])
148
189
 
@@ -212,3 +253,29 @@ class Unit(Jsonable):
212
253
  self._last_rule_row = rule_row
213
254
 
214
255
  return rules
256
+
257
+ def _remove_unit_name(self, line: str, *, remove_revision: bool = False) -> str:
258
+ line = line.replace(self._unit, "")
259
+ if remove_revision:
260
+ line = line.replace("#revision#", "", 1)
261
+ return line.strip()
262
+
263
+ def _create_header(self, *, include_revision: bool = False) -> str:
264
+ header = self._unit
265
+ if include_revision and hasattr(self, "_revision"):
266
+ header += f" #revision#{self._revision}"
267
+ if hasattr(self, "comment") and self.comment != "":
268
+ header += f" {self.comment}"
269
+ return header
270
+
271
+ def _get_first_word(self, line: str) -> str:
272
+ return line.split(" ")[0].strip()
273
+
274
+ def _get_revision_and_comment(self, line: str) -> tuple[int | None, str]:
275
+ line_without_name = self._remove_unit_name(line, remove_revision=True)
276
+ revision = to_int(line_without_name[0], None) if line_without_name != "" else None
277
+ comment = line_without_name[1:].strip()
278
+ return revision, comment
279
+
280
+ def _enforce_dataframe(self, data: Any, columns: tuple[str, ...]) -> pd.DataFrame:
281
+ return data if isinstance(data, pd.DataFrame) else pd.DataFrame([], columns=columns)
@@ -0,0 +1,343 @@
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import copy
20
+ from itertools import chain
21
+ from typing import Any, Callable
22
+
23
+ import pandas as pd
24
+
25
+ NOTATION_THRESHOLD = 10
26
+
27
+
28
+ def split_10_char(line: str) -> list[str]:
29
+ return split_n_char(line, 10)
30
+
31
+
32
+ def split_12_char(line: str) -> list[str]:
33
+ return split_n_char(line, 12)
34
+
35
+
36
+ def split_n_char(line: str, n: int) -> list[str]:
37
+ return [line[i : i + n].strip() for i in range(0, len(line), n)]
38
+
39
+
40
+ def join_10_char(*itms, dp=3):
41
+ """Joins a set of values with a 10 character buffer and right-justified"""
42
+ string = ""
43
+ for itm in itms:
44
+ if itm is None:
45
+ itm = ""
46
+ if isinstance(itm, float):
47
+ # save to 3 dp
48
+ # Use scientific notation if number greater than NOTATION_THRESHOLD characters
49
+ itm = f"{itm:.{dp}e}" if len(f"{itm:.{dp}f}") > NOTATION_THRESHOLD else f"{itm:.{dp}f}"
50
+ itm = str(itm)
51
+ itm = itm[:10]
52
+ string += f"{itm:>10}"
53
+ return string
54
+
55
+
56
+ def join_12_char_ljust(*itms, dp=3):
57
+ """Joins a set of values with a 12 character buffer and left-justified"""
58
+ return join_n_char_ljust(12, *itms, dp=dp)
59
+
60
+
61
+ def join_n_char_ljust(n, *itms, dp=3):
62
+ """Joins a set of values with a n character buffer and left-justified"""
63
+ string = ""
64
+ for itm in itms:
65
+ if itm is None:
66
+ itm = ""
67
+ if isinstance(itm, float):
68
+ # save to 3 dp
69
+ # Use scientific notation if number greater than 10 characters
70
+ itm = f"{itm:.{dp}e}" if len(f"{itm:.{dp}f}") > NOTATION_THRESHOLD else f"{itm:.{dp}f}"
71
+ itm = str(itm)
72
+ itm = itm[:n]
73
+ string += f"{itm:<{n}}"
74
+ return string
75
+
76
+
77
+ def to_float(itm, default=0.0):
78
+ try:
79
+ return float(itm)
80
+ except ValueError:
81
+ return default
82
+
83
+
84
+ def to_int(itm, default=0):
85
+ try:
86
+ return int(itm)
87
+ except ValueError:
88
+ return default
89
+
90
+
91
+ def to_str(itm, default, check_float=False):
92
+ if check_float:
93
+ try:
94
+ return float(itm)
95
+ except ValueError:
96
+ pass
97
+ if itm == "":
98
+ return default
99
+ return itm
100
+
101
+
102
+ def to_data_list(block: list[str], num_cols: int | None = None, date_col: int | None = None):
103
+ if num_cols is not None:
104
+ num_cols += 1 if date_col is not None else 0
105
+ data_list = []
106
+ for row in block:
107
+ row_split = split_10_char(row) if num_cols is None else split_10_char(row)[:num_cols]
108
+ if date_col is not None:
109
+ date_time = " ".join(row_split[date_col : date_col + 2])
110
+ row_split = [
111
+ to_float(itm)
112
+ for idx, itm in enumerate(row_split)
113
+ if idx not in (date_col, date_col + 1)
114
+ ]
115
+ row_split.insert(date_col, date_time)
116
+ else:
117
+ row_split = [to_float(itm) for itm in row_split]
118
+
119
+ row_list = list(row_split)
120
+ data_list.append(row_list)
121
+ return data_list
122
+
123
+
124
+ def set_bridge_params(obj: Any, line: str, *, include_pier: bool = True) -> None:
125
+ params = split_10_char(f"{line:<90}")
126
+ obj.calibration_coefficient = to_float(params[0], 1.0)
127
+ obj.skew = to_float(params[1])
128
+ obj.bridge_width_dual = to_float(params[2])
129
+ obj.bridge_dist_dual = to_float(params[3])
130
+ if include_pier:
131
+ obj.total_pier_width = to_float(params[4])
132
+ obj.orifice_flow = params[5] == "ORIFICE"
133
+ obj.orifice_lower_transition_dist = to_float(params[6])
134
+ obj.orifice_upper_transition_dist = to_float(params[7])
135
+ obj.orifice_discharge_coefficient = to_float(params[8], 1.0)
136
+
137
+
138
+ def set_pier_params(obj: Any, line: str) -> None:
139
+ pier_info = split_10_char(line)
140
+ if int(pier_info[0]) > 0:
141
+ obj.specify_piers = True
142
+ obj.npiers = int(pier_info[0])
143
+ if pier_info[1] == "COEFF":
144
+ obj.pier_use_calibration_coeff = True
145
+ obj.pier_calibration_coeff = to_float(pier_info[3])
146
+ else:
147
+ obj.pier_use_calibration_coeff = False
148
+ obj.pier_shape = pier_info[1]
149
+ obj.pier_faces = pier_info[2]
150
+ else:
151
+ obj.specify_piers = False
152
+ obj.soffit_shape = pier_info[1]
153
+
154
+
155
+ def read_dataframe_from_lines(
156
+ all_lines: list[str],
157
+ end_idx: int,
158
+ read_lines: Callable[[list[str]], pd.DataFrame],
159
+ *args,
160
+ **kwargs,
161
+ ) -> tuple[int, int, pd.DataFrame]:
162
+ nrows = get_int(all_lines[end_idx])
163
+ start_idx = end_idx + 1
164
+ end_idx = start_idx + nrows
165
+ data = read_lines(all_lines[start_idx:end_idx], *args, **kwargs)
166
+ return nrows, end_idx, data
167
+
168
+
169
+ def read_bridge_cross_sections(
170
+ lines: list[str],
171
+ *,
172
+ include_panel_marker: bool = False,
173
+ include_top_level: bool = False,
174
+ ) -> pd.DataFrame:
175
+ data_list = []
176
+ for line in lines:
177
+ line_split = split_10_char(f"{line:<50}")
178
+ df_row = [
179
+ to_float(line_split[0]),
180
+ to_float(line_split[1]),
181
+ to_float(line_split[2]),
182
+ ]
183
+
184
+ if include_panel_marker:
185
+ df_row.append(line_split[3])
186
+
187
+ df_row.append(line_split[4])
188
+
189
+ if include_top_level:
190
+ df_row.append(line_split[5])
191
+
192
+ data_list.append(df_row)
193
+
194
+ columns = ["X", "Y", "Mannings n"]
195
+
196
+ if include_panel_marker:
197
+ columns.append("Panel")
198
+
199
+ columns.append("Embankments")
200
+
201
+ if include_top_level:
202
+ columns.append("Top Level")
203
+ return pd.DataFrame(data_list, columns=columns)
204
+
205
+
206
+ def read_bridge_opening_data(lines: list[str]) -> pd.DataFrame:
207
+ data_list = []
208
+ for line in lines:
209
+ line_split = split_10_char(f"{line:<40}")
210
+ start = to_float(line_split[0])
211
+ finish = to_float(line_split[1])
212
+ spring = to_float(line_split[2])
213
+ soffit = to_float(line_split[3])
214
+ data_list.append([start, finish, spring, soffit])
215
+ return pd.DataFrame(data_list, columns=["Start", "Finish", "Springing Level", "Soffit Level"])
216
+
217
+
218
+ def read_bridge_culvert_data(lines: list[str]) -> pd.DataFrame:
219
+ data_list = []
220
+ for line in lines:
221
+ line_split = split_10_char(f"{line:<70}")
222
+ invert = to_float(line_split[0])
223
+ soffit = to_float(line_split[1])
224
+ area = to_float(line_split[2])
225
+ cd_part = to_float(line_split[3])
226
+ cd_full = to_float(line_split[4])
227
+ dlinen = to_float(line_split[5])
228
+ x = to_float(line_split[6])
229
+ data_list.append([invert, soffit, area, cd_part, cd_full, dlinen, x])
230
+ return pd.DataFrame(
231
+ data_list,
232
+ columns=[
233
+ "Invert",
234
+ "Soffit",
235
+ "Section Area",
236
+ "Cd Part Full",
237
+ "Cd Full",
238
+ "Drowning Coefficient",
239
+ "X",
240
+ ],
241
+ )
242
+
243
+
244
+ def read_bridge_pier_locations(lines: list[str]) -> pd.DataFrame:
245
+ data_list = []
246
+ for line in lines:
247
+ line_split = split_10_char(f"{line:<40}")
248
+ l_x = to_float(line_split[0])
249
+ l_top_level = to_float(line_split[1])
250
+ r_x = to_float(line_split[2])
251
+ r_top_level = to_float(line_split[3])
252
+ data_list.append([l_x, l_top_level, r_x, r_top_level])
253
+ return pd.DataFrame(
254
+ data_list,
255
+ columns=["Left X", "Left Top Level", "Right X", "Right Top Level"],
256
+ )
257
+
258
+
259
+ def read_spill_section_data(lines: list[str]) -> pd.DataFrame:
260
+ data_list = []
261
+ for line in lines:
262
+ line_split = split_10_char(f"{line:<40}")
263
+ chainage = to_float(line_split[0])
264
+ elevation = to_float(line_split[1])
265
+ easting = to_float(line_split[2])
266
+ northing = to_float(line_split[3])
267
+ data_list.append([chainage, elevation, easting, northing])
268
+ return pd.DataFrame(data_list, columns=["X", "Y", "Easting", "Northing"])
269
+
270
+
271
+ def read_superbridge_opening_data(lines: list[str]) -> pd.DataFrame:
272
+ data_list = []
273
+ for line in lines:
274
+ line_split = split_10_char(f"{line:<20}")
275
+ x = to_float(line_split[0])
276
+ z = to_float(line_split[1])
277
+ data_list.append([x, z])
278
+ return pd.DataFrame(data_list, columns=["X", "Z"])
279
+
280
+
281
+ def read_superbridge_block_data(lines: list[str]) -> pd.DataFrame:
282
+ data_list = []
283
+ for line in lines:
284
+ line_split = split_10_char(f"{line:<30}")
285
+ percentage = to_int(line_split[0])
286
+ time = to_float(line_split[1])
287
+ datetime = to_float(line_split[2])
288
+ data_list.append([percentage, time, datetime])
289
+ return pd.DataFrame(data_list, columns=["percentage", "time", "datetime"])
290
+
291
+
292
+ def read_lateral_data(lines: list[str]) -> pd.DataFrame:
293
+ data_list = []
294
+ for line in lines:
295
+ line_split = split_12_char(f"{line:<36}")
296
+ label = line_split[0]
297
+ factor = to_float(line_split[1])
298
+ flag = line_split[2]
299
+ data_list.append([label, factor, flag])
300
+ columns = ["Node Label", "Custom Weight Factor", "Use Weight Factor"]
301
+ return pd.DataFrame(data_list, columns=columns)
302
+
303
+
304
+ def read_reservoir_data(lines: list[str]) -> pd.DataFrame:
305
+ data_list = []
306
+ for line in lines:
307
+ line_split = split_10_char(f"{line:<20}")
308
+ elevation = to_float(line_split[0])
309
+ area = to_float(line_split[1])
310
+ data_list.append([elevation, area])
311
+ columns = ["Elevation", "Plan Area"]
312
+ return pd.DataFrame(data_list, columns=columns)
313
+
314
+
315
+ def get_int(line: str) -> int:
316
+ return int(float(split_10_char(line)[0]))
317
+
318
+
319
+ def write_dataframe(
320
+ header: int | str | None,
321
+ df: pd.DataFrame,
322
+ empty: int | None = None,
323
+ n: int = 10,
324
+ ) -> list[str]:
325
+ df_to_use = copy.deepcopy(df)
326
+ if empty is not None:
327
+ df_to_use.insert(empty, "_", [None] * len(df_to_use))
328
+ lines = [join_n_char_ljust(n, *x) for x in df_to_use.itertuples(index=False)]
329
+ if header is not None:
330
+ lines = [str(header), *lines]
331
+ return lines
332
+
333
+
334
+ def write_dataframes(
335
+ header: int | str | None,
336
+ subheaders: list[int],
337
+ df_list: list[pd.DataFrame],
338
+ ) -> list[str]:
339
+ list_of_lists = [write_dataframe(x, y) for x, y in zip(subheaders, df_list)]
340
+ lines = list(chain.from_iterable(list_of_lists))
341
+ if header is not None:
342
+ lines = [str(header), *lines]
343
+ return lines