floodmodeller-api 0.5.0.post1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. floodmodeller_api/__init__.py +11 -1
  2. floodmodeller_api/_base.py +55 -36
  3. floodmodeller_api/backup.py +15 -12
  4. floodmodeller_api/dat.py +191 -121
  5. floodmodeller_api/diff.py +4 -4
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +15 -14
  7. floodmodeller_api/ied.py +8 -10
  8. floodmodeller_api/ief.py +56 -42
  9. floodmodeller_api/ief_flags.py +1 -1
  10. floodmodeller_api/inp.py +7 -10
  11. floodmodeller_api/logs/lf.py +25 -26
  12. floodmodeller_api/logs/lf_helpers.py +20 -20
  13. floodmodeller_api/logs/lf_params.py +1 -5
  14. floodmodeller_api/mapping.py +11 -2
  15. floodmodeller_api/test/__init__.py +2 -2
  16. floodmodeller_api/test/conftest.py +2 -3
  17. floodmodeller_api/test/test_backup.py +2 -2
  18. floodmodeller_api/test/test_conveyance.py +13 -7
  19. floodmodeller_api/test/test_dat.py +168 -20
  20. floodmodeller_api/test/test_data/EX18_DAT_expected.json +164 -144
  21. floodmodeller_api/test/test_data/EX3_DAT_expected.json +6 -2
  22. floodmodeller_api/test/test_data/EX6_DAT_expected.json +12 -46
  23. floodmodeller_api/test/test_data/encoding_test_cp1252.dat +1081 -0
  24. floodmodeller_api/test/test_data/encoding_test_utf8.dat +1081 -0
  25. floodmodeller_api/test/test_data/integrated_bridge/AR_NoSP_NoBl_2O_NO_OneFRC.ied +33 -0
  26. floodmodeller_api/test/test_data/integrated_bridge/AR_vSP_25pc_1O.ied +32 -0
  27. floodmodeller_api/test/test_data/integrated_bridge/PL_vSP_25pc_1O.ied +34 -0
  28. floodmodeller_api/test/test_data/integrated_bridge/SBTwoFRCsStaggered.IED +32 -0
  29. floodmodeller_api/test/test_data/integrated_bridge/US_NoSP_NoBl_OR_RN.ied +28 -0
  30. floodmodeller_api/test/test_data/integrated_bridge/US_SP_NoBl_OR_frc_PT2-5_RN.ied +34 -0
  31. floodmodeller_api/test/test_data/integrated_bridge/US_fSP_NoBl_1O.ied +30 -0
  32. floodmodeller_api/test/test_data/integrated_bridge/US_nSP_NoBl_1O.ied +49 -0
  33. floodmodeller_api/test/test_data/integrated_bridge/US_vSP_NoBl_2O_Para.ied +35 -0
  34. floodmodeller_api/test/test_data/integrated_bridge.dat +40 -0
  35. floodmodeller_api/test/test_data/network.ied +2 -2
  36. floodmodeller_api/test/test_data/network_dat_expected.json +141 -243
  37. floodmodeller_api/test/test_data/network_ied_expected.json +2 -2
  38. floodmodeller_api/test/test_data/network_with_comments.ied +2 -2
  39. floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
  40. floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
  41. floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
  42. floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
  43. floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
  44. floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
  45. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
  46. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
  47. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
  48. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
  49. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
  50. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
  51. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
  52. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
  53. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
  54. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
  55. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
  56. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
  57. floodmodeller_api/test/test_flowtimeprofile.py +2 -2
  58. floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
  59. floodmodeller_api/test/test_ied.py +3 -3
  60. floodmodeller_api/test/test_ief.py +12 -4
  61. floodmodeller_api/test/test_inp.py +2 -2
  62. floodmodeller_api/test/test_integrated_bridge.py +159 -0
  63. floodmodeller_api/test/test_json.py +14 -13
  64. floodmodeller_api/test/test_logs_lf.py +50 -29
  65. floodmodeller_api/test/test_read_file.py +1 -0
  66. floodmodeller_api/test/test_river.py +12 -12
  67. floodmodeller_api/test/test_tool.py +8 -5
  68. floodmodeller_api/test/test_toolbox_structure_log.py +148 -158
  69. floodmodeller_api/test/test_xml2d.py +14 -16
  70. floodmodeller_api/test/test_zz.py +143 -0
  71. floodmodeller_api/to_from_json.py +9 -9
  72. floodmodeller_api/tool.py +15 -11
  73. floodmodeller_api/toolbox/example_tool.py +5 -1
  74. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +13 -9
  75. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +500 -194
  76. floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
  77. floodmodeller_api/units/__init__.py +15 -0
  78. floodmodeller_api/units/_base.py +87 -20
  79. floodmodeller_api/units/_helpers.py +343 -0
  80. floodmodeller_api/units/boundaries.py +59 -71
  81. floodmodeller_api/units/comment.py +1 -1
  82. floodmodeller_api/units/conduits.py +57 -54
  83. floodmodeller_api/units/connectors.py +112 -0
  84. floodmodeller_api/units/controls.py +107 -0
  85. floodmodeller_api/units/conveyance.py +1 -1
  86. floodmodeller_api/units/iic.py +2 -9
  87. floodmodeller_api/units/losses.py +44 -45
  88. floodmodeller_api/units/sections.py +52 -51
  89. floodmodeller_api/units/structures.py +361 -531
  90. floodmodeller_api/units/units.py +27 -26
  91. floodmodeller_api/units/unsupported.py +5 -7
  92. floodmodeller_api/units/variables.py +2 -2
  93. floodmodeller_api/urban1d/_base.py +13 -17
  94. floodmodeller_api/urban1d/conduits.py +11 -21
  95. floodmodeller_api/urban1d/general_parameters.py +1 -1
  96. floodmodeller_api/urban1d/junctions.py +7 -11
  97. floodmodeller_api/urban1d/losses.py +13 -17
  98. floodmodeller_api/urban1d/outfalls.py +18 -22
  99. floodmodeller_api/urban1d/raingauges.py +5 -10
  100. floodmodeller_api/urban1d/subsections.py +5 -4
  101. floodmodeller_api/urban1d/xsections.py +14 -17
  102. floodmodeller_api/util.py +23 -6
  103. floodmodeller_api/validation/parameters.py +7 -3
  104. floodmodeller_api/validation/urban_parameters.py +1 -4
  105. floodmodeller_api/validation/validation.py +11 -5
  106. floodmodeller_api/version.py +1 -1
  107. floodmodeller_api/xml2d.py +27 -31
  108. floodmodeller_api/xml2d_template.py +1 -1
  109. floodmodeller_api/zz.py +539 -0
  110. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/LICENSE.txt +1 -1
  111. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/METADATA +30 -16
  112. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/RECORD +116 -83
  113. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/WHEEL +1 -1
  114. floodmodeller_api/test/test_zzn.py +0 -36
  115. floodmodeller_api/units/helpers.py +0 -123
  116. floodmodeller_api/zzn.py +0 -414
  117. /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
  118. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/entry_points.txt +0 -0
  119. {floodmodeller_api-0.5.0.post1.dist-info → floodmodeller_api-0.5.2.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from pathlib import Path
2
3
  from unittest.mock import MagicMock, patch
3
4
 
@@ -9,12 +10,12 @@ from floodmodeller_api import IEF, LF1
9
10
  from floodmodeller_api.logs import create_lf
10
11
 
11
12
 
12
- @pytest.fixture
13
- def lf1_fp(test_workspace):
13
+ @pytest.fixture()
14
+ def lf1_fp(test_workspace: Path) -> Path:
14
15
  return Path(test_workspace, "ex3.lf1")
15
16
 
16
17
 
17
- def test_lf1_info_dict(lf1_fp):
18
+ def test_lf1_info_dict(lf1_fp: Path):
18
19
  """LF1: Check info dictionary"""
19
20
  lf1 = LF1(lf1_fp)
20
21
  assert lf1.info["version"] == "5.0.0.7752"
@@ -23,69 +24,89 @@ def test_lf1_info_dict(lf1_fp):
23
24
  assert lf1.info["progress"] == 100
24
25
 
25
26
 
26
- def test_lf1_report_progress(lf1_fp):
27
+ def test_lf1_report_progress(lf1_fp: Path):
27
28
  """LF1: Check report_progress()"""
28
29
  lf1 = LF1(lf1_fp)
29
30
  assert lf1.report_progress() == 100
30
31
 
31
32
 
32
- def test_lf1_to_dataframe(lf1_fp):
33
+ def test_lf1_to_dataframe(lf1_fp: Path):
33
34
  """LF1: Check to_dataframe()"""
34
35
  lf1 = LF1(lf1_fp)
35
- df = lf1.to_dataframe()
36
- assert df.loc[df.index[0], "iter"] == 6
37
- assert df.loc[df.index[-1], "outflow"] == 21.06
38
- assert df.loc[df.index[4], "mass_error"] == -0.07
36
+ lf1_df = lf1.to_dataframe(variable="all")
39
37
 
38
+ assert lf1_df.loc[lf1_df.index[0], "iter"] == 6
39
+ assert lf1.to_dataframe(variable="iter").iloc[0] == 6
40
40
 
41
- def test_lf1_from_ief(lf1_fp, test_workspace):
42
- """LF1: Check IEF.get_lf1()"""
41
+ assert lf1_df.loc[lf1_df.index[-1], "outflow"] == 21.06
42
+ assert lf1.to_dataframe(variable="outflow").iloc[-1] == 21.06
43
+
44
+ assert lf1_df.loc[lf1_df.index[4], "mass_error"] == -0.07
45
+ assert lf1.to_dataframe(variable="mass_error").iloc[4] == -0.07
46
+
47
+ lf1_tuflow_df = lf1.to_dataframe(variable="all", include_tuflow=True)
48
+ non_tuflow_columns = [col for col in lf1_tuflow_df.columns if "tuflow" not in col]
49
+ pd.testing.assert_frame_equal(lf1_tuflow_df[non_tuflow_columns], lf1_df)
50
+
51
+ tuflow_columns = [col for col in lf1_tuflow_df.columns if "tuflow" in col]
52
+ expected_tuflow_columns = ["tuflow_vol", "tuflow_n_wet", "tuflow_dt"]
53
+ assert set(tuflow_columns) == set(expected_tuflow_columns)
54
+
55
+ for col in tuflow_columns:
56
+ assert lf1_tuflow_df[col].isna().all() # there is no tuflow in this lf1
57
+
58
+
59
+ def test_lf1_from_ief(lf1_fp: Path, test_workspace: Path):
60
+ """LF1: Check IEF.get_log()"""
43
61
  lf1 = LF1(lf1_fp)
44
62
 
45
63
  ief_fp = Path(test_workspace, "ex3.ief")
46
64
  ief = IEF(ief_fp)
47
65
  lf1_from_ief = ief.get_log()
48
66
 
49
- assert lf1._filepath == lf1_from_ief._filepath
67
+ assert lf1.filepath == lf1_from_ief.filepath
50
68
  assert lf1.info == lf1_from_ief.info
51
69
  pd.testing.assert_frame_equal(lf1.to_dataframe(), lf1_from_ief.to_dataframe())
52
70
 
53
71
 
54
- def test_log_file_unsupported(capsys):
55
- lf = create_lf(None, "lf3")
72
+ def test_log_file_unsupported(caplog):
73
+ with caplog.at_level(logging.WARNING):
74
+ lf = create_lf(None, "lf3")
56
75
 
57
76
  assert lf is None
58
77
  assert (
59
- capsys.readouterr().out
60
- == "No progress bar as log file must have suffix lf1 or lf2. Simulation will continue as usual.\n"
78
+ caplog.text
79
+ == "WARNING root:lf.py:325 No progress bar as log file must have suffix lf1 or lf2. Simulation will continue as usual.\n"
61
80
  )
62
81
 
63
82
 
64
83
  @pytest.mark.usefixtures("log_timeout")
65
- def test_log_file_timeout(capsys):
66
- lf_filepath = MagicMock()
67
- lf_filepath.is_file.return_value = False
68
- lf = create_lf(lf_filepath, "lf1")
84
+ def test_log_file_timeout(caplog):
85
+ with caplog.at_level(logging.WARNING):
86
+ lf_filepath = MagicMock()
87
+ lf_filepath.is_file.return_value = False
88
+ lf = create_lf(lf_filepath, "lf1")
69
89
 
70
90
  assert lf is None
71
91
  assert (
72
- capsys.readouterr().out
73
- == "No progress bar as log file is expected but not detected. Simulation will continue as usual.\n"
92
+ caplog.text
93
+ == "WARNING root:lf.py:325 No progress bar as log file is expected but not detected. Simulation will continue as usual.\n"
74
94
  )
75
95
 
76
96
 
77
97
  @pytest.mark.usefixtures("log_timeout")
78
98
  @freeze_time("1970-01-01 00:00:00", tick=True)
79
- def test_log_file_from_old_run(capsys):
80
- lf_filepath = MagicMock()
81
- lf_filepath.is_file.return_value = True
82
- lf_filepath.stat.return_value.st_mtime = -10
83
- lf = create_lf(lf_filepath, "lf1")
99
+ def test_log_file_from_old_run(caplog):
100
+ with caplog.at_level(logging.WARNING):
101
+ lf_filepath = MagicMock()
102
+ lf_filepath.is_file.return_value = True
103
+ lf_filepath.stat.return_value.st_mtime = -10
104
+ lf = create_lf(lf_filepath, "lf1")
84
105
 
85
106
  assert lf is None
86
107
  assert (
87
- capsys.readouterr().out
88
- == "No progress bar as log file is from previous run. Simulation will continue as usual.\n"
108
+ caplog.text
109
+ == "WARNING root:lf.py:325 No progress bar as log file is from previous run. Simulation will continue as usual.\n"
89
110
  )
90
111
 
91
112
 
@@ -14,6 +14,7 @@ def test_read_file(test_workspace):
14
14
  ".ied",
15
15
  ".xml",
16
16
  ".zzn",
17
+ ".zzx",
17
18
  ".inp",
18
19
  ".lf1",
19
20
  ".lf2",
@@ -54,8 +54,8 @@ river_unit_data_cases = [
54
54
  ]
55
55
 
56
56
 
57
- @pytest.mark.parametrize(("river_unit_data", "_"), river_unit_data_cases)
58
- def test_read_write(river_unit_data, _):
57
+ @pytest.mark.parametrize("river_unit_data", [x[0] for x in river_unit_data_cases])
58
+ def test_read_write(river_unit_data):
59
59
  river_section_1 = RIVER(river_unit_data)
60
60
  river_section_2 = RIVER(river_section_1._write())
61
61
  assert river_section_1 == river_section_2
@@ -120,7 +120,7 @@ def test_create_from_blank():
120
120
  assert len(blank_unit.data) == 0
121
121
  assert len(blank_unit.active_data) == 0
122
122
  assert blank_unit._write() == [
123
- "RIVER ",
123
+ "RIVER",
124
124
  "SECTION",
125
125
  "new_section ",
126
126
  " 0.000 0.000100 1000.000",
@@ -185,7 +185,7 @@ def test_set_river_dataframe_correct():
185
185
  ],
186
186
  )
187
187
 
188
- df = pd.DataFrame(
188
+ inputs = pd.DataFrame(
189
189
  {
190
190
  "X": [0.0, 1.0, 2.0],
191
191
  "Y": [5.0, 2.0, 5.0],
@@ -200,14 +200,14 @@ def test_set_river_dataframe_correct():
200
200
  },
201
201
  )
202
202
 
203
- unit.data = df.copy()
204
- pd.testing.assert_frame_equal(unit._data, df.copy())
203
+ unit.data = inputs.copy()
204
+ pd.testing.assert_frame_equal(unit._data, inputs.copy())
205
205
 
206
206
 
207
207
  def test_set_river_dataframe_incorrect():
208
208
  unit = RIVER()
209
209
 
210
- df = pd.DataFrame(
210
+ inputs = pd.DataFrame(
211
211
  {
212
212
  "X": [0.0, 1.0, 2.0],
213
213
  "Y": [5.0, 2.0, 5.0],
@@ -220,14 +220,14 @@ def test_set_river_dataframe_incorrect():
220
220
  },
221
221
  )
222
222
 
223
- with pytest.raises(ValueError):
224
- unit.data = df.copy()
223
+ with pytest.raises(ValueError, match="The DataFrame must only contain columns"):
224
+ unit.data = inputs.copy()
225
225
 
226
226
 
227
227
  def test_set_river_dataframe_case_sensitivity():
228
228
  unit = RIVER()
229
229
 
230
- df = pd.DataFrame(
230
+ inputs = pd.DataFrame(
231
231
  {
232
232
  "x": [0.0, 1.0, 2.0],
233
233
  "Y": [5.0, 2.0, 5.0],
@@ -242,5 +242,5 @@ def test_set_river_dataframe_case_sensitivity():
242
242
  },
243
243
  )
244
244
 
245
- unit.data = df.copy()
246
- pd.testing.assert_frame_equal(unit._data, df.copy())
245
+ unit.data = inputs.copy()
246
+ pd.testing.assert_frame_equal(unit._data, inputs.copy())
@@ -1,5 +1,8 @@
1
+ from __future__ import annotations
2
+
1
3
  import tkinter as tk
2
4
  from functools import wraps
5
+ from typing import ClassVar
3
6
  from unittest.mock import MagicMock, patch
4
7
 
5
8
  import pytest
@@ -18,7 +21,7 @@ class SumTool(FMTool):
18
21
  # Define the name
19
22
  name = "Sum tool"
20
23
  description = "A basic tool to add two numbers together"
21
- parameters = [
24
+ parameters: ClassVar[list[Parameter]] = [
22
25
  Parameter(
23
26
  name="a",
24
27
  dtype=float,
@@ -37,7 +40,7 @@ class SumTool(FMTool):
37
40
  tool_function = my_sum
38
41
 
39
42
 
40
- @pytest.fixture
43
+ @pytest.fixture()
41
44
  def tool():
42
45
  return SumTool()
43
46
 
@@ -49,7 +52,7 @@ def test_check_parameters():
49
52
  name = ""
50
53
  description = ""
51
54
  tool_function = print
52
- parameters = [
55
+ parameters: ClassVar[list[Parameter]] = [
53
56
  Parameter(
54
57
  name="param1",
55
58
  dtype=str,
@@ -64,7 +67,7 @@ def test_check_parameters():
64
67
  ),
65
68
  ]
66
69
 
67
- with pytest.raises(ValueError):
70
+ with pytest.raises(ValueError, match="Parameter names must be unique"):
68
71
  MyTool()
69
72
 
70
73
 
@@ -90,7 +93,7 @@ def test_run_from_command_line():
90
93
  class MyTool(FMTool):
91
94
  name = "My Tool"
92
95
  description = "My Tools Description"
93
- parameters = [Parameter("param1", str), Parameter("param2", str)]
96
+ parameters: ClassVar[list[Parameter]] = [Parameter("param1", str), Parameter("param2", str)]
94
97
 
95
98
  @classmethod
96
99
  def tool_function(cls, param1, param2):
@@ -1,22 +1,28 @@
1
1
  import copy
2
2
  import csv
3
+ import json
4
+ import subprocess
3
5
  from pathlib import Path
4
6
 
5
7
  import pandas as pd
6
8
  import pytest
7
9
 
8
10
  from floodmodeller_api import DAT
9
- from floodmodeller_api.toolbox.model_build.structure_log import StructureLogBuilder
11
+ from floodmodeller_api.toolbox import StructureLog
12
+ from floodmodeller_api.toolbox.model_build.structure_log.structure_log import (
13
+ StructureLogBuilder,
14
+ serialise_keys,
15
+ )
10
16
  from floodmodeller_api.units.conduits import CONDUIT
11
17
  from floodmodeller_api.units.structures import ORIFICE
12
18
 
13
19
 
14
- @pytest.fixture
20
+ @pytest.fixture()
15
21
  def slb():
16
22
  return StructureLogBuilder("", "")
17
23
 
18
24
 
19
- @pytest.fixture
25
+ @pytest.fixture()
20
26
  def conduit_empty():
21
27
  c = CONDUIT()
22
28
  c.dist_to_next = 0
@@ -27,6 +33,7 @@ def conduit_empty():
27
33
  c.height = 0
28
34
  c.width = 0
29
35
  c.diameter = 0
36
+ c.elevation_invert = 0
30
37
  c.friction_on_invert = 0
31
38
  c.friction_on_soffit = 0
32
39
  c.friction_on_walls = 0
@@ -40,7 +47,7 @@ def conduit_empty():
40
47
  return c
41
48
 
42
49
 
43
- @pytest.fixture
50
+ @pytest.fixture()
44
51
  def conduit_filled():
45
52
  c = CONDUIT()
46
53
  c.dist_to_next = 0
@@ -51,6 +58,7 @@ def conduit_filled():
51
58
  c.height = 25.45
52
59
  c.width = 3
53
60
  c.diameter = 6
61
+ c.elevation_invert = 3
54
62
  c.friction_on_invert = 1.876
55
63
  c.friction_on_soffit = 1.34
56
64
  c.friction_on_walls = 1.8
@@ -64,171 +72,153 @@ def conduit_filled():
64
72
  return c
65
73
 
66
74
 
67
- @pytest.fixture
75
+ @pytest.fixture()
68
76
  def no_length():
69
77
  return 0
70
78
 
71
79
 
72
- @pytest.fixture
80
+ @pytest.fixture()
73
81
  def with_length():
74
82
  return 4.973
75
83
 
76
84
 
77
- @pytest.fixture
85
+ @pytest.fixture()
78
86
  def structure():
79
87
  return ORIFICE()
80
88
 
81
89
 
82
- def test_conduit_data(slb, conduit_empty):
83
- slb._dat = DAT()
84
- output = slb._conduit_data(conduit_empty)
85
- assert output == [0.0, "", ""]
86
-
87
-
88
- def test_culvert_loss_data(slb):
89
- output = slb._culvert_loss_data("", "")
90
- assert output == ""
91
- output = slb._culvert_loss_data("TEST1", "TEST2")
92
- assert output == "Ki: TEST1, Ko: TEST2"
93
-
94
-
95
- def test_circular_data(slb, conduit_empty, conduit_filled, no_length, with_length):
96
- slb._dat = DAT()
97
- output = slb._circular_data(conduit_empty, no_length)
98
- assert output == ["Mannings: 0", "dia: 0.00 x l: 0.00"]
99
- output = slb._circular_data(conduit_filled, with_length)
100
- assert output == [
101
- "Mannings: [min: 1.453345, max: 3.435]",
102
- "dia: 6.00 x l: 4.97",
103
- ]
104
-
105
-
106
- def test_sprungarch_data(slb, conduit_empty, conduit_filled, no_length, with_length):
107
- output = slb._sprungarch_data(conduit_empty, no_length)
108
- assert output == [
109
- "Mannings: 0",
110
- "(Springing: 0.00, Crown: 0.00) x w: 0.00 x l: 0.00",
111
- ]
112
- output = slb._sprungarch_data(conduit_filled, with_length)
113
- assert output == [
114
- "Mannings: [min: 1.34, max: 1.876]",
115
- "(Springing: 23.10, Crown: 5.40) x w: 3.00 x l: 4.97",
116
- ]
117
-
118
-
119
- def test_rectangular_data(slb, conduit_empty, conduit_filled, no_length, with_length):
120
- output = slb._rectangular_data(conduit_empty, no_length)
121
- assert output == ["Mannings: 0", "h: 0.00 x w: 0.00 x l: 0.00"]
122
- output = slb._rectangular_data(conduit_filled, with_length)
123
- assert output == [
124
- "Mannings: [min: 1.34, max: 1.876]",
125
- "h: 25.45 x w: 3.00 x l: 4.97",
126
- ]
127
-
128
-
129
- def test_section_data(slb, conduit_empty, conduit_filled, no_length, with_length):
130
- output = slb._section_data(conduit_empty, no_length)
131
- assert output == ["Colebrook-White: 0", "h: 0.00 x w: 0.00 x l: 0.00"]
132
- output = slb._section_data(conduit_filled, with_length)
133
- assert output == [
134
- "Colebrook-White: [min: 0.0, max: 4.0]",
135
- "h: 65.00 x w: 150.00 x l: 4.97",
136
- ]
137
-
138
-
139
- def test_sprung_data(slb, conduit_empty, conduit_filled, no_length, with_length):
140
- output = slb._sprung_data(conduit_empty, no_length)
141
- assert output == [
142
- "Mannings: 0",
143
- "(Springing: 0.00, Crown: 0.00) x w: 0.00 x l: 0.00",
144
- ]
145
- output = slb._sprung_data(conduit_filled, with_length)
146
- assert output == [
147
- "Mannings: [min: 1.34, max: 1.876]",
148
- "(Springing: 23.10, Crown: 5.40) x w: 3.00 x l: 4.97",
149
- ]
150
-
151
-
152
- def test_orifice_dimensions(slb, structure):
153
- structure.invert = 1
154
- output = slb._orifice_dimensions(structure)
155
- assert output == "h: -1.00 x w: -1.00"
156
-
157
-
158
- def test_spill_data(slb, structure):
159
- structure.data = pd.DataFrame(data={"X": [0, 0], "Y": [0, 0]})
160
- structure.weir_coefficient = 0
161
- output = slb._spill_data(structure)
162
- assert output == ["Elevation: 0.00 x w: 0.00", 0]
163
-
164
-
165
- def test_bridge_data(slb, structure):
166
- structure.section_data = pd.DataFrame(data={"X": [0, 0], "Y": [0, 0], "Mannings n": [0, 0]})
167
- structure.opening_data = pd.DataFrame(
168
- data={"Start": 0, "Finish": 0, "Springing Level": 0, "Soffit Level": 0},
169
- index=[0],
170
- )
171
- output = slb._bridge_data(structure)
172
- assert output == ["Mannings: 0", "h: 0.00 x w: 0.00"]
173
-
174
-
175
- def test_add_conduits(slb, conduit_filled, tmpdir):
176
- slb._dat = DAT()
177
- prev_c = copy.deepcopy(conduit_filled)
178
- prev_c.dist_to_next = 0
179
- prev_c.name = "prev"
180
- slb._dat.conduits["prev"] = prev_c
181
- conduit_filled.dist_to_next = 5
182
- slb._dat.conduits["test_conduit"] = conduit_filled
183
- next_c = copy.deepcopy(conduit_filled)
184
- next_c.dist_to_next = 0
185
- slb._dat.conduits["next"] = next_c
186
- slb._dat._all_units = [prev_c, conduit_filled, next_c]
187
- conduit_non_subtype = copy.deepcopy(conduit_filled)
188
- conduit_non_subtype._subtype = "NON_SUBTYPE"
189
- slb._dat.conduits["test_conduit_NON_SUBTYPE"] = conduit_non_subtype
190
-
191
- tmp_csv = Path(tmpdir) / "temp_structure_data.csv"
192
- with tmp_csv.open("w") as file:
90
+ @pytest.fixture()
91
+ def conduit_chain_dat(conduit_filled):
92
+ dat = DAT()
93
+ names = ["first", "second", "third", "fourth"]
94
+ for name in names:
95
+ cond = copy.deepcopy(conduit_filled)
96
+ cond.dist_to_next = 10
97
+ cond.name = name
98
+ dat.conduits[name] = cond
99
+ dat._all_units.append(cond)
100
+
101
+ cond = copy.deepcopy(conduit_filled)
102
+ cond.dist_to_next = 0
103
+ cond.name = "fifth"
104
+ dat.conduits["fifth"] = cond
105
+ dat._all_units.append(cond)
106
+ return dat
107
+
108
+
109
+ @pytest.fixture()
110
+ def ex18_dat_path(test_workspace):
111
+ return Path(test_workspace, "EX18.DAT")
112
+
113
+
114
+ @pytest.fixture()
115
+ def ex18_dat_expected():
116
+ # This is about the limit of what can be pasted in code, if any larger test material is found then read from csv.
117
+ return """Unit Name,Unit Type,Unit Subtype,Comment,Friction,Dimensions (m),Weir Coefficient,Culvert Inlet/Outlet Loss
118
+ C2,CONDUIT,CIRCULAR,,"Mannings: [min: 0.015, max: 0.020]",dia: 1.00 x l: 100.00 (Total conduit length: 500.00),,Ki: 0.6
119
+ C2_R1,REPLICATE,,,,,,
120
+ C2_R2,REPLICATE,,,,,,
121
+ C2_R3,REPLICATE,,,,,,
122
+ C2_R4,REPLICATE,,,,,,
123
+ C2m,CONDUIT,CIRCULAR,,"Mannings: [min: 0.015, max: 0.020]",dia: 1.00 x l: 0.00,,
124
+ C2md,CONDUIT,CIRCULAR,,"Mannings: [min: 0.015, max: 0.020]",dia: 1.00 x l: 100.00 (Total conduit length: 700.00),,
125
+ C2_R5,REPLICATE,,,,,,
126
+ C2_R6,REPLICATE,,,,,,
127
+ C2_R7,REPLICATE,,,,,,
128
+ C2_R8,REPLICATE,,,,,,
129
+ C2_R9,REPLICATE,,,,,,
130
+ C2_R10,REPLICATE,,,,,,
131
+ C2d,CONDUIT,CIRCULAR,,"Mannings: [min: 0.015, max: 0.020]",dia: 1.00 x l: 0.00,,
132
+ S0,WEIR,,,,Crest Elevation: 21.00 x w: 1.50,,
133
+ C2d,WEIR,,,,Crest Elevation: 18.00 x w: 0.60,,
134
+ S4,WEIR,,,,Crest Elevation: 17.90 x w: 2.00,,
135
+ S8,WEIR,,,,Crest Elevation: 17.70 x w: 2.00,,
136
+ S3LS,SPILL,,,,Elevation: 20.00 x w: 100.00,1.7,
137
+ """
138
+
139
+
140
+ def test_empty_conduit(slb, conduit_empty):
141
+ slb.dat = DAT()
142
+ output, _ = slb._conduit_data(conduit_empty)
143
+ assert output == {
144
+ "length": 0.0,
145
+ "total_length": 0.0,
146
+ }
147
+
148
+
149
+ def test_multi_conduits(slb, conduit_chain_dat, tmp_path):
150
+ expected = """Unit Name,Unit Type,Unit Subtype,Comment,Friction,Dimensions (m),Weir Coefficient,Culvert Inlet/Outlet Loss
151
+ first,CONDUIT,SECTION,,"Colebrook-White: [min: 0.000, max: 4.000]",h: 65.00 x w: 156.00 x l: 10.00 (Total conduit length: 40.00),,
152
+ second,CONDUIT,SECTION,,"Colebrook-White: [min: 0.000, max: 4.000]",h: 65.00 x w: 156.00 x l: 10.00,,
153
+ third,CONDUIT,SECTION,,"Colebrook-White: [min: 0.000, max: 4.000]",h: 65.00 x w: 156.00 x l: 10.00,,
154
+ fourth,CONDUIT,SECTION,,"Colebrook-White: [min: 0.000, max: 4.000]",h: 65.00 x w: 156.00 x l: 10.00,,
155
+ fifth,CONDUIT,SECTION,,"Colebrook-White: [min: 0.000, max: 4.000]",h: 65.00 x w: 156.00 x l: 0.00,,
156
+ """
157
+
158
+ slb.dat = conduit_chain_dat
159
+ tmp_csv = tmp_path / "test_multi_conduits.csv"
160
+ with tmp_csv.open("w", newline="") as file:
193
161
  slb._writer = csv.writer(file)
194
- slb._add_conduits()
195
-
196
-
197
- def test_add_structures(slb, structure, tmpdir):
198
- slb._dat = DAT()
199
- structure.soffit = 3
200
- structure.weir_coefficient = 1
201
- structure.data = pd.DataFrame(data={"X": [0, 0], "Y": [0, 0]})
202
- structure.section_data = pd.DataFrame(data={"X": [0, 0], "Y": [0, 0], "Mannings n": [0, 0]})
203
- structure.opening_data = pd.DataFrame(
204
- data={"Start": 0, "Finish": 0, "Springing Level": 0, "Soffit Level": 0},
205
- index=[0],
162
+ slb.add_conduits()
163
+ slb.write_csv_output(file)
164
+
165
+ with open(tmp_csv) as read_file:
166
+ text = read_file.read()
167
+
168
+ assert text == expected
169
+
170
+
171
+ @pytest.mark.parametrize(
172
+ ("filename"),
173
+ [
174
+ ("EX18"),
175
+ ("EX6"),
176
+ ("EX17"),
177
+ ],
178
+ )
179
+ def test_multiple_dats(filename, test_workspace, tmp_path):
180
+ dat_path = Path(test_workspace, f"{filename}.DAT")
181
+ expected_csv_path = Path(test_workspace / "structure_logs", f"{filename}_expected.csv")
182
+ expected_json_path = Path(test_workspace / "structure_logs", f"{filename}_expected.json")
183
+ test_csv_path = tmp_path / f"test_multiple_dats_{filename}.csv"
184
+ slb = StructureLogBuilder(dat_path, test_csv_path)
185
+ slb.dat = DAT(slb.dat_file_path)
186
+ slb.add_conduits()
187
+ slb.add_structures()
188
+
189
+ with expected_json_path.open("r") as file:
190
+ expected_json_data = json.load(file)
191
+
192
+ assert serialise_keys(slb.unit_store) == expected_json_data
193
+
194
+ with open(slb.csv_output_path, "w", newline="") as file:
195
+ slb.write_csv_output(file)
196
+
197
+ with expected_csv_path.open("r") as file:
198
+ expected_csv_data = file.read()
199
+ with test_csv_path.open("r") as file:
200
+ test_csv_data = file.read()
201
+
202
+ assert test_csv_data == expected_csv_data
203
+
204
+
205
+ def test_full_dat_from_python(slb, tmp_path, ex18_dat_path, ex18_dat_expected):
206
+ # these two tests should be as described in the toolbox documentation
207
+ tmp_csv = tmp_path / "test_full_dat_from_python.csv"
208
+ StructureLog.run(input_path=ex18_dat_path, output_path=tmp_csv)
209
+
210
+ with open(tmp_csv) as read_file:
211
+ text = read_file.read()
212
+ assert text == ex18_dat_expected
213
+
214
+
215
+ def test_full_dat_from_commandline(slb, tmp_path, ex18_dat_path, ex18_dat_expected):
216
+ # these two tests should be as described in the toolbox documentation
217
+ tmp_csv = tmp_path / "test_full_dat_from_python.csv"
218
+ subprocess.call(
219
+ ["fmapi-structure_log", "--input_path", str(ex18_dat_path), "--output_path", str(tmp_csv)],
206
220
  )
207
- structure.crest_elevation = 1
208
- structure.weir_breadth = 1
209
- structure.weir_length = 1
210
- structure.weir_elevation = 1
211
- slb._dat.structures["test_structure_orifice"] = structure
212
- struc_spill = copy.deepcopy(structure)
213
- struc_spill._unit = "SPILL"
214
- slb._dat.structures["test_structure_spill"] = struc_spill
215
- struc_sluice = copy.deepcopy(structure)
216
- struc_sluice._unit = "SLUICE"
217
- slb._dat.structures["test_structure_sluice"] = struc_sluice
218
- struc_rnweir = copy.deepcopy(structure)
219
- struc_rnweir._unit = "RNWEIR"
220
- slb._dat.structures["test_structure_rnweir"] = struc_rnweir
221
- struc_weir = copy.deepcopy(structure)
222
- struc_weir._unit = "WEIR"
223
- slb._dat.structures["test_structure_weir"] = struc_weir
224
- struc_bridge = copy.deepcopy(structure)
225
- struc_bridge._unit = "BRIDGE"
226
- slb._dat.structures["test_structure_bridge"] = struc_bridge
227
- struc_none = copy.deepcopy(structure)
228
- struc_none._unit = "NONE"
229
- slb._dat.structures["test_structure_none"] = struc_none
230
-
231
- tmp_csv = Path(tmpdir) / "temp_structure_data.csv"
232
- with tmp_csv.open("w") as file:
233
- slb._writer = csv.writer(file)
234
- slb._add_structures()
221
+
222
+ with open(tmp_csv) as read_file:
223
+ text = read_file.read()
224
+ assert text == ex18_dat_expected