floodmodeller-api 0.5.1__py3-none-any.whl → 0.5.2.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. floodmodeller_api/__init__.py +10 -0
  2. floodmodeller_api/_base.py +29 -20
  3. floodmodeller_api/backup.py +12 -10
  4. floodmodeller_api/dat.py +161 -91
  5. floodmodeller_api/diff.py +1 -1
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +1 -1
  7. floodmodeller_api/ied.py +2 -4
  8. floodmodeller_api/ief.py +29 -17
  9. floodmodeller_api/ief_flags.py +1 -1
  10. floodmodeller_api/inp.py +4 -6
  11. floodmodeller_api/logs/lf.py +18 -12
  12. floodmodeller_api/logs/lf_helpers.py +2 -2
  13. floodmodeller_api/logs/lf_params.py +1 -5
  14. floodmodeller_api/mapping.py +9 -2
  15. floodmodeller_api/test/test_conveyance.py +9 -4
  16. floodmodeller_api/test/test_dat.py +166 -18
  17. floodmodeller_api/test/test_data/EX18_DAT_expected.json +164 -144
  18. floodmodeller_api/test/test_data/EX3_DAT_expected.json +6 -2
  19. floodmodeller_api/test/test_data/EX6_DAT_expected.json +12 -46
  20. floodmodeller_api/test/test_data/encoding_test_cp1252.dat +1081 -0
  21. floodmodeller_api/test/test_data/encoding_test_utf8.dat +1081 -0
  22. floodmodeller_api/test/test_data/integrated_bridge/AR_NoSP_NoBl_2O_NO_OneFRC.ied +33 -0
  23. floodmodeller_api/test/test_data/integrated_bridge/AR_vSP_25pc_1O.ied +32 -0
  24. floodmodeller_api/test/test_data/integrated_bridge/PL_vSP_25pc_1O.ied +34 -0
  25. floodmodeller_api/test/test_data/integrated_bridge/SBTwoFRCsStaggered.IED +32 -0
  26. floodmodeller_api/test/test_data/integrated_bridge/US_NoSP_NoBl_OR_RN.ied +28 -0
  27. floodmodeller_api/test/test_data/integrated_bridge/US_SP_NoBl_OR_frc_PT2-5_RN.ied +34 -0
  28. floodmodeller_api/test/test_data/integrated_bridge/US_fSP_NoBl_1O.ied +30 -0
  29. floodmodeller_api/test/test_data/integrated_bridge/US_nSP_NoBl_1O.ied +49 -0
  30. floodmodeller_api/test/test_data/integrated_bridge/US_vSP_NoBl_2O_Para.ied +35 -0
  31. floodmodeller_api/test/test_data/integrated_bridge.dat +40 -0
  32. floodmodeller_api/test/test_data/network.ied +2 -2
  33. floodmodeller_api/test/test_data/network_dat_expected.json +141 -243
  34. floodmodeller_api/test/test_data/network_ied_expected.json +2 -2
  35. floodmodeller_api/test/test_data/network_with_comments.ied +2 -2
  36. floodmodeller_api/test/test_ied.py +1 -1
  37. floodmodeller_api/test/test_ief.py +10 -2
  38. floodmodeller_api/test/test_integrated_bridge.py +159 -0
  39. floodmodeller_api/test/test_json.py +9 -3
  40. floodmodeller_api/test/test_logs_lf.py +45 -24
  41. floodmodeller_api/test/test_river.py +1 -1
  42. floodmodeller_api/test/test_toolbox_structure_log.py +0 -1
  43. floodmodeller_api/test/test_xml2d.py +5 -5
  44. floodmodeller_api/to_from_json.py +1 -1
  45. floodmodeller_api/tool.py +3 -5
  46. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +1 -1
  47. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +10 -8
  48. floodmodeller_api/units/__init__.py +15 -0
  49. floodmodeller_api/units/_base.py +73 -10
  50. floodmodeller_api/units/_helpers.py +343 -0
  51. floodmodeller_api/units/boundaries.py +59 -71
  52. floodmodeller_api/units/comment.py +1 -1
  53. floodmodeller_api/units/conduits.py +57 -54
  54. floodmodeller_api/units/connectors.py +112 -0
  55. floodmodeller_api/units/controls.py +107 -0
  56. floodmodeller_api/units/iic.py +2 -9
  57. floodmodeller_api/units/losses.py +42 -42
  58. floodmodeller_api/units/sections.py +40 -43
  59. floodmodeller_api/units/structures.py +360 -530
  60. floodmodeller_api/units/units.py +25 -26
  61. floodmodeller_api/units/unsupported.py +5 -7
  62. floodmodeller_api/units/variables.py +2 -2
  63. floodmodeller_api/urban1d/_base.py +7 -8
  64. floodmodeller_api/urban1d/conduits.py +11 -21
  65. floodmodeller_api/urban1d/general_parameters.py +1 -1
  66. floodmodeller_api/urban1d/junctions.py +7 -11
  67. floodmodeller_api/urban1d/losses.py +13 -17
  68. floodmodeller_api/urban1d/outfalls.py +16 -21
  69. floodmodeller_api/urban1d/raingauges.py +3 -9
  70. floodmodeller_api/urban1d/subsections.py +3 -4
  71. floodmodeller_api/urban1d/xsections.py +11 -15
  72. floodmodeller_api/util.py +7 -4
  73. floodmodeller_api/validation/parameters.py +7 -3
  74. floodmodeller_api/validation/urban_parameters.py +1 -4
  75. floodmodeller_api/validation/validation.py +9 -4
  76. floodmodeller_api/version.py +1 -1
  77. floodmodeller_api/xml2d.py +9 -11
  78. floodmodeller_api/xml2d_template.py +1 -1
  79. floodmodeller_api/zz.py +7 -6
  80. {floodmodeller_api-0.5.1.dist-info → floodmodeller_api-0.5.2.post1.dist-info}/LICENSE.txt +1 -1
  81. {floodmodeller_api-0.5.1.dist-info → floodmodeller_api-0.5.2.post1.dist-info}/METADATA +11 -3
  82. {floodmodeller_api-0.5.1.dist-info → floodmodeller_api-0.5.2.post1.dist-info}/RECORD +85 -70
  83. {floodmodeller_api-0.5.1.dist-info → floodmodeller_api-0.5.2.post1.dist-info}/WHEEL +1 -1
  84. floodmodeller_api/units/helpers.py +0 -121
  85. {floodmodeller_api-0.5.1.dist-info → floodmodeller_api-0.5.2.post1.dist-info}/entry_points.txt +0 -0
  86. {floodmodeller_api-0.5.1.dist-info → floodmodeller_api-0.5.2.post1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,159 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from pathlib import Path
5
+ from typing import TYPE_CHECKING
6
+
7
+ import pandas as pd
8
+ import pytest
9
+
10
+ from floodmodeller_api.units import BRIDGE
11
+
12
+ if TYPE_CHECKING:
13
+ from pathlib import Path
14
+
15
+
16
+ def create_bridge(path: Path) -> BRIDGE:
17
+ with open(path) as file:
18
+ lines = [line.rstrip("\n") for line in file]
19
+ return BRIDGE(lines)
20
+
21
+
22
+ @pytest.fixture()
23
+ def folder(test_workspace: Path) -> Path:
24
+ return test_workspace / "integrated_bridge"
25
+
26
+
27
+ def test_read_bridge(folder: Path): # noqa: PLR0915 (all needed)
28
+ unit = create_bridge(folder / "US_vSP_NoBl_2O_Para.ied")
29
+
30
+ assert unit.comment == "prototype for rev 3 / No Spill data, no blockage"
31
+
32
+ assert unit.name == "Label11"
33
+ assert unit.ds_label == "Label12"
34
+ assert unit.us_remote_label == "CH0001"
35
+ assert unit.ds_remote_label == "CH0002"
36
+
37
+ assert unit.revision == 3
38
+ assert unit.bridge_name == "Clifton Suspension Bridge"
39
+ assert unit.integrated_subtype == "USBPR"
40
+
41
+ assert unit.calibration_coefficient == 1
42
+ assert unit.skew == 0
43
+ assert unit.bridge_width_dual == 0
44
+ assert unit.bridge_dist_dual == 0
45
+ assert unit.total_pier_width == 0
46
+ assert unit.orifice_flow is True
47
+ assert unit.orifice_lower_transition_dist == 0.3
48
+ assert unit.orifice_upper_transition_dist == 0.1
49
+ assert unit.orifice_discharge_coefficient == 1
50
+
51
+ assert unit.abutment_type == 3
52
+ assert unit.specify_piers is False
53
+ assert unit.soffit_shape == "FLAT"
54
+
55
+ assert unit.aligned is True
56
+
57
+ assert unit.section_nrows_list == [4, 0, 0, 0]
58
+
59
+ assert unit.opening_type == "PARABOLA1"
60
+ assert unit.opening_nrows == 2
61
+ assert unit.opening_nsubrows_list == [3, 3]
62
+
63
+ assert unit.culvert_nrows == 0
64
+
65
+ assert unit.spill_nrows == 3
66
+ assert unit.weir_coefficient == 1.7
67
+ assert unit.modular_limit == 0.9
68
+
69
+ assert unit.block_nrows == 0
70
+ assert unit.inlet_loss == 0.5
71
+ assert unit.outlet_loss == 1
72
+ assert unit.block_method == "USDEPTH"
73
+ assert unit.override is False
74
+
75
+ expected = pd.DataFrame(
76
+ {
77
+ "X": [-10.0, -10.0, 10.0, 10.0],
78
+ "Y": [5.0, 0.0, 0.0, 5.0],
79
+ "Mannings n": [0.035, 0.035, 0.035, 0.035],
80
+ "Panel": ["*", "", "", "*"],
81
+ "Embankments": ["LEFT", "", "", "RIGHT"],
82
+ },
83
+ )
84
+ pd.testing.assert_frame_equal(unit.section_data_list[0], expected)
85
+
86
+ expected = pd.DataFrame({"X": [], "Y": [], "Mannings n": [], "Panel": [], "Embankments": []})
87
+ pd.testing.assert_frame_equal(unit.section_data_list[1], expected, check_dtype=False)
88
+ pd.testing.assert_frame_equal(unit.section_data_list[2], expected, check_dtype=False)
89
+ pd.testing.assert_frame_equal(unit.section_data_list[3], expected, check_dtype=False)
90
+
91
+ expected = pd.DataFrame({"X": [-7.5, -5.0, -2.5], "Z": [0.0, 5.0, 0.0]})
92
+ pd.testing.assert_frame_equal(unit.opening_data_list[0], expected)
93
+
94
+ expected = pd.DataFrame({"X": [2.5, 5.0, 7.5], "Z": [0.0, 5.0, 0.0]})
95
+ pd.testing.assert_frame_equal(unit.opening_data_list[1], expected)
96
+
97
+ expected = pd.DataFrame(
98
+ {
99
+ "Invert": [],
100
+ "Soffit": [],
101
+ "Section Area": [],
102
+ "Cd Part Full": [],
103
+ "Cd Full": [],
104
+ "Drowning Coefficient": [],
105
+ "X": [],
106
+ },
107
+ )
108
+ pd.testing.assert_frame_equal(unit.culvert_data, expected, check_dtype=False)
109
+
110
+ expected = pd.DataFrame(
111
+ {
112
+ "X": [-10.0, 0.0, 10.0],
113
+ "Y": [7.0, 9.0, 7.0],
114
+ "Easting": [0.0, 0.0, 0.0],
115
+ "Northing": [0.0, 0.0, 0.0],
116
+ },
117
+ )
118
+ pd.testing.assert_frame_equal(unit.spill_data, expected)
119
+
120
+ expected = pd.DataFrame({"percentage": [], "time": [], "datetime": []})
121
+ pd.testing.assert_frame_equal(unit.block_data, expected, check_dtype=False)
122
+
123
+
124
+ def test_write_bridge(folder: Path):
125
+ for file in folder.glob("*.ied"):
126
+ unit = create_bridge(folder / file)
127
+ output = unit._write()
128
+
129
+ new_unit = BRIDGE(output)
130
+ new_output = new_unit._write()
131
+ assert unit == new_unit, f"unit objects not equal for {file=}"
132
+ assert output == new_output, f"unit outputs not equal for {file=}"
133
+ for line in output:
134
+ assert isinstance(line, str), f"{line=} is not a string"
135
+
136
+
137
+ def test_valid_change(folder: Path):
138
+ unit = create_bridge(folder / "US_vSP_NoBl_2O_Para.ied")
139
+
140
+ assert unit.calibration_coefficient == 1
141
+ unit.calibration_coefficient = 10
142
+ assert unit.calibration_coefficient == 10
143
+
144
+ output = unit._write()
145
+ new_unit = BRIDGE(output)
146
+ assert new_unit.calibration_coefficient == 10
147
+
148
+
149
+ def test_invalid_change(folder: Path):
150
+ unit = create_bridge(folder / "US_vSP_NoBl_2O_Para.ied")
151
+ unit.calibration_coefficient = "hi" # type: ignore
152
+ # ignoring typing as this mistake is on purpose
153
+ msg = (
154
+ "One or more parameters in <floodmodeller_api Unit Class:"
155
+ " BRIDGE(name=Label11, type=INTEGRATED)> are invalid:"
156
+ "\n calibration_coefficient -> Expected: (<class 'float'>, <class 'int'>)"
157
+ )
158
+ with pytest.raises(ValueError, match=re.escape(msg)):
159
+ unit._write()
@@ -1,5 +1,8 @@
1
+ from __future__ import annotations
2
+
1
3
  import json
2
4
  from pathlib import Path
5
+ from typing import TYPE_CHECKING
3
6
 
4
7
  import pytest
5
8
 
@@ -7,6 +10,9 @@ from floodmodeller_api import DAT, IED, IEF, INP, XML2D
7
10
  from floodmodeller_api.to_from_json import is_jsonable
8
11
  from floodmodeller_api.util import read_file
9
12
 
13
+ if TYPE_CHECKING:
14
+ from floodmodeller_api._base import FMFile
15
+
10
16
 
11
17
  def create_expected_json_files():
12
18
  """Helper function to recreate all the expected JSON files if needed at any point due to updates
@@ -48,7 +54,7 @@ def test_dat_json(dat_obj):
48
54
 
49
55
 
50
56
  @pytest.fixture()
51
- def parameterised_objs_and_expected(test_workspace):
57
+ def parameterised_objs_and_expected(test_workspace) -> list[tuple[FMFile, Path]]:
52
58
  """JSON: expected after passing to_json method"""
53
59
  return [
54
60
  (DAT(test_workspace / "EX18.DAT"), test_workspace / "EX18_DAT_expected.json"),
@@ -62,7 +68,7 @@ def parameterised_objs_and_expected(test_workspace):
62
68
  ]
63
69
 
64
70
 
65
- def test_to_json_matches_expected(parameterised_objs_and_expected):
71
+ def test_to_json_matches_expected(parameterised_objs_and_expected: list[tuple[FMFile, Path]]):
66
72
  """JSON: To test if the json object produced in to_json is identical to the expected json file"""
67
73
  for obj, json_expected in parameterised_objs_and_expected:
68
74
  # First, to create and handle the json (str) object
@@ -79,7 +85,7 @@ def test_to_json_matches_expected(parameterised_objs_and_expected):
79
85
  json_dict_from_obj.pop(key, None)
80
86
  json_dict_from_file.pop(key, None)
81
87
 
82
- assert json_dict_from_obj == json_dict_from_file
88
+ assert json_dict_from_obj == json_dict_from_file, f"object not equal for {obj.filepath!s}"
83
89
 
84
90
 
85
91
  @pytest.mark.parametrize(
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from pathlib import Path
2
3
  from unittest.mock import MagicMock, patch
3
4
 
@@ -10,11 +11,11 @@ from floodmodeller_api.logs import create_lf
10
11
 
11
12
 
12
13
  @pytest.fixture()
13
- def lf1_fp(test_workspace):
14
+ def lf1_fp(test_workspace: Path) -> Path:
14
15
  return Path(test_workspace, "ex3.lf1")
15
16
 
16
17
 
17
- def test_lf1_info_dict(lf1_fp):
18
+ def test_lf1_info_dict(lf1_fp: Path):
18
19
  """LF1: Check info dictionary"""
19
20
  lf1 = LF1(lf1_fp)
20
21
  assert lf1.info["version"] == "5.0.0.7752"
@@ -23,23 +24,40 @@ def test_lf1_info_dict(lf1_fp):
23
24
  assert lf1.info["progress"] == 100
24
25
 
25
26
 
26
- def test_lf1_report_progress(lf1_fp):
27
+ def test_lf1_report_progress(lf1_fp: Path):
27
28
  """LF1: Check report_progress()"""
28
29
  lf1 = LF1(lf1_fp)
29
30
  assert lf1.report_progress() == 100
30
31
 
31
32
 
32
- def test_lf1_to_dataframe(lf1_fp):
33
+ def test_lf1_to_dataframe(lf1_fp: Path):
33
34
  """LF1: Check to_dataframe()"""
34
35
  lf1 = LF1(lf1_fp)
35
- lf1_df = lf1.to_dataframe()
36
+ lf1_df = lf1.to_dataframe(variable="all")
37
+
36
38
  assert lf1_df.loc[lf1_df.index[0], "iter"] == 6
39
+ assert lf1.to_dataframe(variable="iter").iloc[0] == 6
40
+
37
41
  assert lf1_df.loc[lf1_df.index[-1], "outflow"] == 21.06
42
+ assert lf1.to_dataframe(variable="outflow").iloc[-1] == 21.06
43
+
38
44
  assert lf1_df.loc[lf1_df.index[4], "mass_error"] == -0.07
45
+ assert lf1.to_dataframe(variable="mass_error").iloc[4] == -0.07
46
+
47
+ lf1_tuflow_df = lf1.to_dataframe(variable="all", include_tuflow=True)
48
+ non_tuflow_columns = [col for col in lf1_tuflow_df.columns if "tuflow" not in col]
49
+ pd.testing.assert_frame_equal(lf1_tuflow_df[non_tuflow_columns], lf1_df)
50
+
51
+ tuflow_columns = [col for col in lf1_tuflow_df.columns if "tuflow" in col]
52
+ expected_tuflow_columns = ["tuflow_vol", "tuflow_n_wet", "tuflow_dt"]
53
+ assert set(tuflow_columns) == set(expected_tuflow_columns)
54
+
55
+ for col in tuflow_columns:
56
+ assert lf1_tuflow_df[col].isna().all() # there is no tuflow in this lf1
39
57
 
40
58
 
41
- def test_lf1_from_ief(lf1_fp, test_workspace):
42
- """LF1: Check IEF.get_lf1()"""
59
+ def test_lf1_from_ief(lf1_fp: Path, test_workspace: Path):
60
+ """LF1: Check IEF.get_log()"""
43
61
  lf1 = LF1(lf1_fp)
44
62
 
45
63
  ief_fp = Path(test_workspace, "ex3.ief")
@@ -51,41 +69,44 @@ def test_lf1_from_ief(lf1_fp, test_workspace):
51
69
  pd.testing.assert_frame_equal(lf1.to_dataframe(), lf1_from_ief.to_dataframe())
52
70
 
53
71
 
54
- def test_log_file_unsupported(capsys):
55
- lf = create_lf(None, "lf3")
72
+ def test_log_file_unsupported(caplog):
73
+ with caplog.at_level(logging.WARNING):
74
+ lf = create_lf(None, "lf3")
56
75
 
57
76
  assert lf is None
58
77
  assert (
59
- capsys.readouterr().out
60
- == "No progress bar as log file must have suffix lf1 or lf2. Simulation will continue as usual.\n"
78
+ caplog.text
79
+ == "WARNING root:lf.py:325 No progress bar as log file must have suffix lf1 or lf2. Simulation will continue as usual.\n"
61
80
  )
62
81
 
63
82
 
64
83
  @pytest.mark.usefixtures("log_timeout")
65
- def test_log_file_timeout(capsys):
66
- lf_filepath = MagicMock()
67
- lf_filepath.is_file.return_value = False
68
- lf = create_lf(lf_filepath, "lf1")
84
+ def test_log_file_timeout(caplog):
85
+ with caplog.at_level(logging.WARNING):
86
+ lf_filepath = MagicMock()
87
+ lf_filepath.is_file.return_value = False
88
+ lf = create_lf(lf_filepath, "lf1")
69
89
 
70
90
  assert lf is None
71
91
  assert (
72
- capsys.readouterr().out
73
- == "No progress bar as log file is expected but not detected. Simulation will continue as usual.\n"
92
+ caplog.text
93
+ == "WARNING root:lf.py:325 No progress bar as log file is expected but not detected. Simulation will continue as usual.\n"
74
94
  )
75
95
 
76
96
 
77
97
  @pytest.mark.usefixtures("log_timeout")
78
98
  @freeze_time("1970-01-01 00:00:00", tick=True)
79
- def test_log_file_from_old_run(capsys):
80
- lf_filepath = MagicMock()
81
- lf_filepath.is_file.return_value = True
82
- lf_filepath.stat.return_value.st_mtime = -10
83
- lf = create_lf(lf_filepath, "lf1")
99
+ def test_log_file_from_old_run(caplog):
100
+ with caplog.at_level(logging.WARNING):
101
+ lf_filepath = MagicMock()
102
+ lf_filepath.is_file.return_value = True
103
+ lf_filepath.stat.return_value.st_mtime = -10
104
+ lf = create_lf(lf_filepath, "lf1")
84
105
 
85
106
  assert lf is None
86
107
  assert (
87
- capsys.readouterr().out
88
- == "No progress bar as log file is from previous run. Simulation will continue as usual.\n"
108
+ caplog.text
109
+ == "WARNING root:lf.py:325 No progress bar as log file is from previous run. Simulation will continue as usual.\n"
89
110
  )
90
111
 
91
112
 
@@ -120,7 +120,7 @@ def test_create_from_blank():
120
120
  assert len(blank_unit.data) == 0
121
121
  assert len(blank_unit.active_data) == 0
122
122
  assert blank_unit._write() == [
123
- "RIVER ",
123
+ "RIVER",
124
124
  "SECTION",
125
125
  "new_section ",
126
126
  " 0.000 0.000100 1000.000",
@@ -108,7 +108,6 @@ def conduit_chain_dat(conduit_filled):
108
108
 
109
109
  @pytest.fixture()
110
110
  def ex18_dat_path(test_workspace):
111
- # TODO: Source a better test case that can be opened to public repo.
112
111
  return Path(test_workspace, "EX18.DAT")
113
112
 
114
113
 
@@ -3,6 +3,7 @@ from pathlib import Path
3
3
  import pytest
4
4
 
5
5
  from floodmodeller_api import XML2D
6
+ from floodmodeller_api.util import FloodModellerAPIError
6
7
 
7
8
 
8
9
  @pytest.fixture()
@@ -130,15 +131,14 @@ def test_xml2d_reorder_elem_computational_area_wrong_position():
130
131
  x2d.domains[domain]["computational_area"]["ncols"] = 12
131
132
  x2d.domains[domain]["computational_area"]["nrows"] = 42
132
133
  x2d.domains[domain]["computational_area"]["rotation"] = 3.14159
133
-
134
134
  x2d.domains[domain]["run_data"]["upwind"] = "upwind value"
135
135
  x2d.domains[domain]["run_data"]["wall"] = "Humpty Dumpty"
136
-
137
- # TODO: Add check that this should fail validation if in the wrong order
138
- # # how do I check that something fails?
139
-
140
136
  assert x2d._write()
141
137
 
138
+ x2d.domains[domain]["run_data"]["upwind123"] = "upwind value"
139
+ with pytest.raises(FloodModellerAPIError):
140
+ assert x2d._write()
141
+
142
142
 
143
143
  def test_xml2d_update_value(xml_fp, data_before):
144
144
  """XML2D: Test changing and reverting link1d file and dtm makes no changes"""
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
4
 
5
5
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
6
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
floodmodeller_api/tool.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
+ import logging
4
5
  import sys
5
6
  import tkinter as tk
6
7
  from dataclasses import dataclass
@@ -156,9 +157,6 @@ class Gui:
156
157
  entry.pack()
157
158
  self.root_entries[name] = entry
158
159
 
159
- # TODO: Add a progress bar if appropriate
160
- # TODO: Present some useful information: either tool outputs or logs
161
-
162
160
  def run_gui_callback(self):
163
161
  """
164
162
  Method to run the gui callback function.
@@ -308,9 +306,9 @@ class FMTool:
308
306
  value = getattr(args, input_param.name)
309
307
  input_kwargs[input_param.name] = input_param.dtype(value)
310
308
 
311
- print(f"Running {self.name}")
309
+ logging.info("Running %s", self.name)
312
310
  self.run(**input_kwargs)
313
- print("Completed")
311
+ logging.info("Completed")
314
312
  # Return nothing
315
313
 
316
314
  def generate_gui(self):
@@ -1,4 +1,4 @@
1
- """ This function allows you to raise the minimum bed level 300mm across all sections in a DAT file (i.e siltation) """
1
+ """This function allows you to raise the minimum bed level 300mm across all sections in a DAT file (i.e siltation)"""
2
2
 
3
3
  from __future__ import annotations
4
4
 
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import copy
4
4
  import csv
5
+ import logging
5
6
  from typing import TYPE_CHECKING
6
7
 
7
8
  from floodmodeller_api import DAT
@@ -224,7 +225,6 @@ class StructureLogBuilder:
224
225
 
225
226
  return {"dimensions": dimensions}
226
227
 
227
- # TODO: a refactor to combine the _add_conduits and _add_structures together would be nice for clarity
228
228
  def add_conduits(self):
229
229
  conduit_stack = copy.deepcopy(list(self.dat.conduits.values()))
230
230
 
@@ -245,8 +245,9 @@ class StructureLogBuilder:
245
245
  ("CONDUIT", "SPRUNG"),
246
246
  ("REPLICATE", None),
247
247
  ]:
248
- print(
249
- f'Conduit sub-type "{conduit.subtype}" is currently unsupported in structure log',
248
+ logging.warning(
249
+ "Conduit subtype: %s not currently supported in structure log",
250
+ conduit.subtype,
250
251
  )
251
252
  continue
252
253
  conduit_dict, add_to_conduit_stack = self._conduit_data(conduit)
@@ -339,7 +340,7 @@ class StructureLogBuilder:
339
340
 
340
341
  culvert_data = []
341
342
  if hasattr(structure, "culvert_data") and structure.culvert_data.shape[0] > 1:
342
- for _, row in structure.culvert_data:
343
+ for _, row in structure.culvert_data.iterrows():
343
344
  culvert = {
344
345
  "invert": row["Invert"],
345
346
  "soffit": row["Soffit"],
@@ -357,7 +358,6 @@ class StructureLogBuilder:
357
358
  }
358
359
 
359
360
  def _sluice_data(self, structure: SLUICE) -> dict:
360
- # TODO: these could do with more attention, given more time
361
361
  dimensions = extract_attrs(structure, {"crest_elevation", "weir_breadth", "weir_length"})
362
362
 
363
363
  return {"dimensions": dimensions}
@@ -395,7 +395,10 @@ class StructureLogBuilder:
395
395
  elif structure._unit == "BRIDGE":
396
396
  self.unit_store[(structure.name, structure._unit)] |= self._bridge_data(structure)
397
397
  else:
398
- print(f'Structure "{structure._unit}" is currently unsupported in structure log')
398
+ logging.warning(
399
+ "Structure: %s not currently supported in structure log",
400
+ structure._unit,
401
+ )
399
402
  continue
400
403
 
401
404
  def _format_friction(self, unit_dict):
@@ -422,7 +425,6 @@ class StructureLogBuilder:
422
425
  return text
423
426
 
424
427
  def _format_bridge_dimensions(self, unit_dict):
425
-
426
428
  if len(unit_dict["opening_data"]) == 1:
427
429
  opening = unit_dict["opening_data"][0]
428
430
  height = opening["opening_height"]
@@ -545,7 +547,7 @@ class StructureLogBuilder:
545
547
 
546
548
  culvert_loss = ""
547
549
 
548
- match (unit_type):
550
+ match unit_type:
549
551
  case "BRIDGE":
550
552
  dimensions = self._format_bridge_dimensions(unit_dict)
551
553
  case "ORIFICE":
@@ -1,6 +1,10 @@
1
+ from typing import TypeAlias
2
+
1
3
  from .boundaries import HTBDY, QHBDY, QTBDY, REFHBDY
2
4
  from .comment import COMMENT
3
5
  from .conduits import CONDUIT
6
+ from .connectors import JUNCTION, LATERAL
7
+ from .controls import RESERVOIR
4
8
  from .iic import IIC
5
9
  from .losses import BLOCKAGE, CULVERT
6
10
  from .sections import INTERPOLATE, REPLICATE, RIVER
@@ -18,3 +22,14 @@ from .structures import (
18
22
  from .units import ALL_UNIT_TYPES, SUPPORTED_UNIT_TYPES, UNSUPPORTED_UNIT_TYPES
19
23
  from .unsupported import UNSUPPORTED
20
24
  from .variables import Variables
25
+
26
+ TBoundaries: TypeAlias = HTBDY | QHBDY | QTBDY | REFHBDY
27
+ TSections: TypeAlias = INTERPOLATE | REPLICATE | RIVER
28
+ TConduits: TypeAlias = CONDUIT
29
+ TConnectors: TypeAlias = JUNCTION | LATERAL
30
+ TControls: TypeAlias = RESERVOIR
31
+ TLosses: TypeAlias = BLOCKAGE | CULVERT
32
+ TStructures: TypeAlias = (
33
+ BRIDGE | CRUMP | FLAT_V_WEIR | ORIFICE | OUTFALL | RNWEIR | SLUICE | SPILL | WEIR
34
+ )
35
+ TUnsupported: TypeAlias = UNSUPPORTED
@@ -1,6 +1,6 @@
1
1
  """
2
2
  Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
3
+ Copyright (C) 2025 Jacobs U.K. Limited
4
4
 
5
5
  This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
6
  as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
@@ -18,11 +18,15 @@ from __future__ import annotations
18
18
 
19
19
  """ Holds the base unit class for all FM Units """
20
20
 
21
+ import logging
22
+ from itertools import chain
23
+ from typing import Any
24
+
21
25
  import pandas as pd
22
26
 
23
27
  from ..diff import check_item_with_dataframe_equal
24
28
  from ..to_from_json import Jsonable
25
- from .helpers import _to_float, _to_str, join_10_char, join_n_char_ljust, split_10_char
29
+ from ._helpers import join_10_char, join_n_char_ljust, split_10_char, to_float, to_int, to_str
26
30
 
27
31
 
28
32
  class Unit(Jsonable):
@@ -59,6 +63,36 @@ class Unit(Jsonable):
59
63
  msg = f'Failed to set unit name to "{new_name}" due to error: {e}'
60
64
  raise Exception(msg) from e
61
65
 
66
+ @property
67
+ def all_labels(self) -> set[str]:
68
+ """All explicit labels associated with a unit."""
69
+ label_attrs = [
70
+ "name",
71
+ "spill",
72
+ "spill1",
73
+ "spill2",
74
+ "first_spill",
75
+ "second_spill",
76
+ "lat1",
77
+ "lat2",
78
+ "lat3",
79
+ "lat4",
80
+ "ds_label",
81
+ ]
82
+ label_list_attrs = ["labels", "lateral_inflow_labels"]
83
+
84
+ labels = {getattr(self, x) for x in label_attrs if hasattr(self, x)}
85
+ label_lists = [getattr(self, x) for x in label_list_attrs if hasattr(self, x)]
86
+
87
+ return (labels | set(chain(*label_lists))) - {""}
88
+
89
+ @property
90
+ def unique_name(self) -> str:
91
+ if self._name is None:
92
+ msg = "No unique name available."
93
+ raise ValueError(msg)
94
+ return f"{self._unit}_{self._name}"
95
+
62
96
  @property
63
97
  def subtype(self) -> str | None:
64
98
  return self._subtype
@@ -91,14 +125,17 @@ class Unit(Jsonable):
91
125
  def _diff(self, other):
92
126
  diff = self._get_diff(other)
93
127
  if diff[0]:
94
- print("No difference, units are equivalent")
128
+ logging.info("No difference, units are equivalent")
95
129
  else:
96
- print("\n".join([f"{name}: {reason}" for name, reason in diff[1]]))
130
+ logging.info("\n".join([f"{name}: {reason}" for name, reason in diff[1]]))
97
131
 
98
132
  def _get_diff(self, other):
99
133
  return self.__eq__(other, return_diff=True) # pylint: disable=unnecessary-dunder-call
100
134
 
101
135
  def __eq__(self, other, return_diff=False):
136
+ if not isinstance(other, Unit):
137
+ return NotImplemented if not return_diff else (False, ["Type mismatch"])
138
+
102
139
  result = True
103
140
  diff = []
104
141
  result, diff = check_item_with_dataframe_equal(
@@ -114,16 +151,16 @@ class Unit(Jsonable):
114
151
  def _read_rules(self, block):
115
152
  rule_params = split_10_char(block[self._last_gate_row + 1])
116
153
  self.nrules = int(rule_params[0])
117
- self.rule_sample_time = _to_float(rule_params[1])
118
- self.timeunit = _to_str(rule_params[2], "SECONDS", check_float=False)
119
- self.extendmethod = _to_str(rule_params[3], "EXTEND")
154
+ self.rule_sample_time = to_float(rule_params[1])
155
+ self.timeunit = to_str(rule_params[2], "SECONDS", check_float=False)
156
+ self.extendmethod = to_str(rule_params[3], "EXTEND")
120
157
  self.rules = self._get_logical_rules(self.nrules, block, self._last_gate_row + 2)
121
158
  # Get time rule data set
122
159
  nrows = int(split_10_char(block[self._last_rule_row + 1])[0])
123
160
  data_list = []
124
161
  for row in block[self._last_rule_row + 2 : self._last_rule_row + 2 + nrows]:
125
162
  row_split = split_10_char(f"{row:<20}")
126
- x = _to_float(row_split[0]) # time
163
+ x = to_float(row_split[0]) # time
127
164
  y = row[10:].strip() # operating rules
128
165
  data_list.append([x, y])
129
166
  self._last_time_row = self._last_rule_row + nrows + 1
@@ -139,14 +176,14 @@ class Unit(Jsonable):
139
176
  self.has_varrules = True
140
177
  varrule_params = split_10_char(block[self._last_time_row + 2])
141
178
  self.nvarrules = int(varrule_params[0])
142
- self.varrule_sample_time = _to_float(rule_params[1])
179
+ self.varrule_sample_time = to_float(rule_params[1])
143
180
  self.varrules = self._get_logical_rules(self.nvarrules, block, self._last_time_row + 3)
144
181
  # Get time rule data set
145
182
  var_nrows = int(split_10_char(block[self._last_rule_row + 1])[0])
146
183
  data_list = []
147
184
  for row in block[self._last_rule_row + 2 : self._last_rule_row + 2 + var_nrows]:
148
185
  row_split = split_10_char(f"{row:<20}")
149
- x = _to_float(row_split[0]) # time
186
+ x = to_float(row_split[0]) # time
150
187
  y = row[10:].strip() # operating rules
151
188
  data_list.append([x, y])
152
189
 
@@ -216,3 +253,29 @@ class Unit(Jsonable):
216
253
  self._last_rule_row = rule_row
217
254
 
218
255
  return rules
256
+
257
+ def _remove_unit_name(self, line: str, *, remove_revision: bool = False) -> str:
258
+ line = line.replace(self._unit, "", 1)
259
+ if remove_revision:
260
+ line = line.replace("#revision#", "", 1)
261
+ return line.strip()
262
+
263
+ def _create_header(self, *, include_revision: bool = False) -> str:
264
+ header = self._unit
265
+ if include_revision and hasattr(self, "_revision"):
266
+ header += f" #revision#{self._revision}"
267
+ if hasattr(self, "comment") and self.comment != "":
268
+ header += f" {self.comment}"
269
+ return header
270
+
271
+ def _get_first_word(self, line: str) -> str:
272
+ return line.split(" ")[0].strip()
273
+
274
+ def _get_revision_and_comment(self, line: str) -> tuple[int | None, str]:
275
+ line_without_name = self._remove_unit_name(line, remove_revision=True)
276
+ revision = to_int(line_without_name[0], None) if line_without_name != "" else None
277
+ comment = line_without_name[1:].strip()
278
+ return revision, comment
279
+
280
+ def _enforce_dataframe(self, data: Any, columns: tuple[str, ...]) -> pd.DataFrame:
281
+ return data if isinstance(data, pd.DataFrame) else pd.DataFrame([], columns=columns)