floodmodeller-api 0.4.4__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. floodmodeller_api/__init__.py +1 -0
  2. floodmodeller_api/dat.py +117 -96
  3. floodmodeller_api/hydrology_plus/__init__.py +2 -0
  4. floodmodeller_api/hydrology_plus/helper.py +23 -0
  5. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +333 -0
  6. floodmodeller_api/ied.py +93 -90
  7. floodmodeller_api/ief.py +233 -50
  8. floodmodeller_api/ief_flags.py +1 -0
  9. floodmodeller_api/logs/lf.py +5 -1
  10. floodmodeller_api/mapping.py +2 -0
  11. floodmodeller_api/test/test_conveyance.py +23 -32
  12. floodmodeller_api/test/test_data/7082.ief +28 -0
  13. floodmodeller_api/test/test_data/BaseModel_2D_Q100.ief +28 -0
  14. floodmodeller_api/test/test_data/Baseline_unchecked.csv +77 -0
  15. floodmodeller_api/test/test_data/Constant QT.ief +19 -0
  16. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +7 -7
  17. floodmodeller_api/test/test_data/EX18_DAT_expected.json +54 -38
  18. floodmodeller_api/test/test_data/EX3_DAT_expected.json +246 -166
  19. floodmodeller_api/test/test_data/EX3_IEF_expected.json +25 -20
  20. floodmodeller_api/test/test_data/EX6_DAT_expected.json +522 -350
  21. floodmodeller_api/test/test_data/FEH boundary.ief +23 -0
  22. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +7 -7
  23. floodmodeller_api/test/test_data/P3Panels_UNsteady.ief +25 -0
  24. floodmodeller_api/test/test_data/QT in dat file.ief +20 -0
  25. floodmodeller_api/test/test_data/T10.ief +25 -0
  26. floodmodeller_api/test/test_data/T2.ief +25 -0
  27. floodmodeller_api/test/test_data/T5.ief +25 -0
  28. floodmodeller_api/test/test_data/df_flows_hplus.csv +56 -0
  29. floodmodeller_api/test/test_data/event_hplus.csv +56 -0
  30. floodmodeller_api/test/test_data/ex4.ief +20 -0
  31. floodmodeller_api/test/test_data/ex6.ief +21 -0
  32. floodmodeller_api/test/test_data/example_h+_export.csv +77 -0
  33. floodmodeller_api/test/test_data/hplus_export_example_1.csv +72 -0
  34. floodmodeller_api/test/test_data/hplus_export_example_10.csv +77 -0
  35. floodmodeller_api/test/test_data/hplus_export_example_2.csv +79 -0
  36. floodmodeller_api/test/test_data/hplus_export_example_3.csv +77 -0
  37. floodmodeller_api/test/test_data/hplus_export_example_4.csv +131 -0
  38. floodmodeller_api/test/test_data/hplus_export_example_5.csv +77 -0
  39. floodmodeller_api/test/test_data/hplus_export_example_6.csv +131 -0
  40. floodmodeller_api/test/test_data/hplus_export_example_7.csv +131 -0
  41. floodmodeller_api/test/test_data/hplus_export_example_8.csv +131 -0
  42. floodmodeller_api/test/test_data/hplus_export_example_9.csv +131 -0
  43. floodmodeller_api/test/test_data/network_dat_expected.json +312 -210
  44. floodmodeller_api/test/test_data/network_ied_expected.json +6 -6
  45. floodmodeller_api/test/test_data/network_with_comments.ied +55 -0
  46. floodmodeller_api/test/test_flowtimeprofile.py +133 -0
  47. floodmodeller_api/test/test_hydrology_plus_export.py +210 -0
  48. floodmodeller_api/test/test_ied.py +12 -0
  49. floodmodeller_api/test/test_ief.py +49 -9
  50. floodmodeller_api/test/test_json.py +6 -1
  51. floodmodeller_api/test/test_read_file.py +27 -0
  52. floodmodeller_api/test/test_river.py +169 -0
  53. floodmodeller_api/to_from_json.py +7 -1
  54. floodmodeller_api/tool.py +6 -10
  55. floodmodeller_api/units/__init__.py +11 -1
  56. floodmodeller_api/units/boundaries.py +6 -0
  57. floodmodeller_api/units/conveyance.py +101 -212
  58. floodmodeller_api/units/sections.py +120 -39
  59. floodmodeller_api/util.py +2 -0
  60. floodmodeller_api/version.py +1 -1
  61. floodmodeller_api/xml2d.py +20 -13
  62. floodmodeller_api/xsd_backup.xml +738 -0
  63. {floodmodeller_api-0.4.4.dist-info → floodmodeller_api-0.5.0.dist-info}/METADATA +2 -1
  64. {floodmodeller_api-0.4.4.dist-info → floodmodeller_api-0.5.0.dist-info}/RECORD +68 -34
  65. {floodmodeller_api-0.4.4.dist-info → floodmodeller_api-0.5.0.dist-info}/WHEEL +1 -1
  66. {floodmodeller_api-0.4.4.dist-info → floodmodeller_api-0.5.0.dist-info}/LICENSE.txt +0 -0
  67. {floodmodeller_api-0.4.4.dist-info → floodmodeller_api-0.5.0.dist-info}/entry_points.txt +0 -0
  68. {floodmodeller_api-0.4.4.dist-info → floodmodeller_api-0.5.0.dist-info}/top_level.txt +0 -0
@@ -1,16 +1,16 @@
1
1
  {
2
2
  "API Class": "floodmodeller_api.ied.IED",
3
- "API Version": "0.4.2.post1",
3
+ "API Version": "0.4.4.post1",
4
4
  "Object Attributes": {
5
- "_filepath": "C:\\Users\\PIERCEJA\\OneDrive - Jacobs\\Documents\\Projects\\Flood Modeller API\\development\\floodmodeller-api\\floodmodeller_api\\test\\test_data\\network.ied",
5
+ "_filepath": "c:\\Users\\PIERCEJA\\OneDrive - Jacobs\\Documents\\Projects\\Flood Modeller API\\development\\floodmodeller-api\\floodmodeller_api\\test\\test_data\\network.ied",
6
6
  "file": {
7
7
  "API Class": "floodmodeller_api.backup.File",
8
8
  "Object Attributes": {
9
- "path": "C:\\Users\\PIERCEJA\\OneDrive - Jacobs\\Documents\\Projects\\Flood Modeller API\\development\\floodmodeller-api\\floodmodeller_api\\test\\test_data\\network.ied",
9
+ "path": "c:\\Users\\PIERCEJA\\OneDrive - Jacobs\\Documents\\Projects\\Flood Modeller API\\development\\floodmodeller-api\\floodmodeller_api\\test\\test_data\\network.ied",
10
10
  "ext": ".ied",
11
- "dttm_str": "2024-04-17-14-09-24",
12
- "file_id": "88256e1c368ee0953d6b32dba41eeafa01bcb245",
13
- "backup_filename": "88256e1c368ee0953d6b32dba41eeafa01bcb245_2024-04-17-14-09-24.ied",
11
+ "dttm_str": "2024-09-10-16-49-49",
12
+ "file_id": "b54eee4697704dd5fd29f9faa79728e67f517e26",
13
+ "backup_filename": "b54eee4697704dd5fd29f9faa79728e67f517e26_2024-09-10-16-49-49.ied",
14
14
  "temp_dir": "C:\\Users\\PIERCEJA\\AppData\\Local\\Temp",
15
15
  "backup_dirname": "floodmodeller_api_backup",
16
16
  "backup_dir": "C:\\Users\\PIERCEJA\\AppData\\Local\\Temp\\floodmodeller_api_backup",
@@ -0,0 +1,55 @@
1
+ COMMENT
2
+ 5
3
+ Comment with
4
+ five
5
+ lines
6
+ at the
7
+ very start
8
+ FSSR16BDY
9
+ resin
10
+ 66.000
11
+ 0.0 1.000 SCALEFACT 1.000HYDROGRAPH 1.00 FULL
12
+ ENGLAND
13
+ 99.000 6.500 11.300 0.400 0.050
14
+ 0.000 13.000 30.900 0.000
15
+ 761.000 55.000 0.370 167.400
16
+ FSRER
17
+ 100.000 0.000 0.960
18
+ FSRCW
19
+ 0.000
20
+ F16PR FIXED
21
+ 30.000
22
+ OBSTP
23
+ 1.000 4.500
24
+ F16BF
25
+ 0.000
26
+ OBSUH
27
+ 7 mmarea
28
+ 0.000
29
+ 0.100
30
+ 0.300
31
+ 0.500
32
+ 0.300
33
+ 0.100
34
+ 0.000
35
+ WINRP 0
36
+ 0
37
+ COMMENT
38
+ 1
39
+ Adding a comment part way through to test
40
+ QTBDY
41
+ CS26
42
+ 4 0.000 0.000 seconds NOEXTEND LINEAR 0.000 0.000 OVERRIDE
43
+ 1.000 0.000
44
+ 1.000 46800.000
45
+ 0.000 47160.000
46
+ 0.000 1.000e+09
47
+ QHBDY
48
+ DS4
49
+ 6 0.000 LINEAR
50
+ 0.000 67.030
51
+ 1.751 67.750
52
+ 5.303 68.220
53
+ 10.351 68.560
54
+ 33.975 69.059
55
+ 65.165 69.487
@@ -0,0 +1,133 @@
1
+ from pathlib import Path
2
+
3
+ import pandas as pd
4
+ import pytest
5
+
6
+ from floodmodeller_api.ief import FlowTimeProfile
7
+
8
+
9
+ @pytest.fixture
10
+ def valid_raw_strings():
11
+ return [
12
+ ',,4,"fmfile.csv",fm1,T=100,"SingleEvent,AllNodes"',
13
+ 'label1,5,4,"fmfile.csv",fm1,T=100,"SingleEvent,SingleNode"',
14
+ 'label1 label2,5 6,4,"fmfile.csv",fm2,T=100,MultiEvent&Nodes',
15
+ 'label1,9,2,"ReFH2.csv",refh2,T=100',
16
+ 'label1,5,4,"fsu.csv",fsu,T=100,"Total flow m3/s (1 year)- urbanised model"',
17
+ r'Welches_Dam,12,23,"C:\Users\ka007155\Downloads\Baseline unchecked.csv",hplus,1 - 11 - 2020 Upper',
18
+ ]
19
+
20
+
21
+ @pytest.mark.parametrize(
22
+ "raw_string, expected",
23
+ [
24
+ (
25
+ ',,4,"fmfile.csv",fm1,T=100,"SingleEvent,AllNodes"',
26
+ {
27
+ "labels": [],
28
+ "columns": [],
29
+ "start_row": 4,
30
+ "csv_filepath": "fmfile.csv",
31
+ "file_type": "fm1",
32
+ "profile": "T=100",
33
+ "comment": '"SingleEvent,AllNodes"',
34
+ },
35
+ ),
36
+ (
37
+ 'label1,5,4,fmfile.csv,fm1,T=100,"SingleEvent,SingleNode"',
38
+ {
39
+ "labels": ["label1"],
40
+ "columns": [5],
41
+ "start_row": 4,
42
+ "csv_filepath": "fmfile.csv",
43
+ "file_type": "fm1",
44
+ "profile": "T=100",
45
+ "comment": '"SingleEvent,SingleNode"',
46
+ },
47
+ ),
48
+ ],
49
+ )
50
+ def test_parse_raw_string(raw_string, expected):
51
+ flow_profile = FlowTimeProfile(raw_string=raw_string)
52
+
53
+ assert flow_profile.labels == expected["labels"]
54
+ assert flow_profile.columns == expected["columns"]
55
+ assert flow_profile.start_row == expected["start_row"]
56
+ assert flow_profile.csv_filepath == expected["csv_filepath"]
57
+ assert flow_profile.file_type == expected["file_type"]
58
+ assert flow_profile.profile == expected["profile"]
59
+ assert flow_profile.comment == expected["comment"]
60
+
61
+
62
+ def test_init_with_kwargs():
63
+ kwargs = {
64
+ "labels": ["label1", "label2"],
65
+ "columns": [5, 6],
66
+ "start_row": 4,
67
+ "csv_filepath": '"fmfile.csv"',
68
+ "file_type": "fm2",
69
+ "profile": "T=100",
70
+ "comment": "MultiEvent&Nodes",
71
+ "ief_filepath": "../projects",
72
+ }
73
+ flow_profile = FlowTimeProfile(**kwargs)
74
+
75
+ assert flow_profile.labels == ["label1", "label2"]
76
+ assert flow_profile.columns == [5, 6]
77
+ assert flow_profile.start_row == 4
78
+ assert flow_profile.csv_filepath == '"fmfile.csv"'
79
+ assert flow_profile.file_type == "fm2"
80
+ assert flow_profile.profile == "T=100"
81
+ assert flow_profile.comment == "MultiEvent&Nodes"
82
+ assert flow_profile._csvfile == Path("../projects/fmfile.csv").resolve()
83
+
84
+
85
+ @pytest.mark.parametrize(
86
+ "raw_string",
87
+ [
88
+ "label1 label2,5 6,4,fmfile.csv,fm2,T=100,MultiEvent&Nodes",
89
+ ',,4,fmfile.csv,fm1,T=100,"SingleEvent,AllNodes"',
90
+ ],
91
+ )
92
+ def test_str_representation(raw_string):
93
+ flow_profile = FlowTimeProfile(raw_string=raw_string)
94
+
95
+ assert str(flow_profile) == raw_string
96
+
97
+
98
+ def test_count_series_with_fm1(mocker):
99
+ mocker.patch("pandas.read_csv", return_value=pd.DataFrame(columns=["A", "B", "C"]))
100
+
101
+ kwargs = {
102
+ "labels": ["label1"],
103
+ "columns": [5],
104
+ "start_row": 4,
105
+ "csv_filepath": '"fmfile.csv"',
106
+ "file_type": "fm1",
107
+ "profile": "T=100",
108
+ "comment": "SingleEvent,AllNodes",
109
+ "ief_filepath": "../projects",
110
+ }
111
+
112
+ flow_profile = FlowTimeProfile(**kwargs)
113
+
114
+ series_count = flow_profile.count_series()
115
+ assert series_count == 3 # Since the mock CSV has 3 columns
116
+
117
+
118
+ def test_count_series_without_fm1():
119
+ kwargs = {
120
+ "labels": ["label1"],
121
+ "columns": [5, 6],
122
+ "start_row": 4,
123
+ "csv_filepath": '"fmfile.csv"',
124
+ "file_type": "fm2",
125
+ "profile": "T=100",
126
+ "comment": "MultiEvent&Nodes",
127
+ "ief_filepath": "../projects",
128
+ }
129
+
130
+ flow_profile = FlowTimeProfile(**kwargs)
131
+
132
+ series_count = flow_profile.count_series()
133
+ assert series_count == 2 # Number of columns passed
@@ -0,0 +1,210 @@
1
+ """Tests to check the class HydrographPlus"""
2
+
3
+ import pandas as pd
4
+ import pytest
5
+
6
+ from floodmodeller_api.hydrology_plus.hydrology_plus_export import HydrologyPlusExport
7
+ from floodmodeller_api.ief import IEF, FlowTimeProfile
8
+ from floodmodeller_api.units import QTBDY
9
+ from floodmodeller_api.util import FloodModellerAPIError
10
+
11
+
12
+ @pytest.fixture()
13
+ def expected_metadata():
14
+ """Extracts the metadata from the csv to be compared with the class"""
15
+
16
+ return {
17
+ "Hydrograph Name": "Baseline unchecked",
18
+ "Hydrograph Description": "",
19
+ "Calculation Point": "CP_003",
20
+ "ReFH2 Name": "CP_003_ReFH2_1",
21
+ "Winfap Name": "CP_003_WINFAP_1",
22
+ "Urban/Rural": "Urban",
23
+ "Urban/Rural Comment": "",
24
+ "ReFH2 Comment": "",
25
+ "Winfap Comment": "",
26
+ "Winfap Distribution": "GEV",
27
+ "Winfap Distribution Comment": "",
28
+ "Use Climate Change Allowances": "True",
29
+ "Use Custom Scale Factors": "False",
30
+ "Created By": "KA007155",
31
+ "Created Date": "30/04/2024 09:42:23",
32
+ "Checksum": "ef77d9bd-2eb3-4689-a1e3-665d293db810",
33
+ }
34
+
35
+
36
+ @pytest.fixture()
37
+ def expected_dataframe(test_workspace):
38
+ """Extracts the df with all the flows to be compared with the df of the class"""
39
+
40
+ return pd.read_csv(test_workspace / "df_flows_hplus.csv", index_col=0)
41
+
42
+
43
+ @pytest.fixture()
44
+ def expected_event(test_workspace):
45
+ """Extracts the event from the original csv to be compared with the output of the class"""
46
+ event_file = pd.read_csv(test_workspace / "event_hplus.csv", index_col=0)
47
+
48
+ return event_file.squeeze()
49
+
50
+
51
+ @pytest.fixture()
52
+ def hydrology_plus_export_object(test_workspace):
53
+ """Creates the object to make the comparison with the csv data"""
54
+
55
+ return HydrologyPlusExport(test_workspace / "Baseline_unchecked.csv")
56
+
57
+
58
+ def test_data_metadata(
59
+ expected_metadata: dict[str, str],
60
+ hydrology_plus_export_object: HydrologyPlusExport,
61
+ ):
62
+ """Compares the metadata between the csv and the class"""
63
+ assert expected_metadata == hydrology_plus_export_object.metadata
64
+
65
+
66
+ def test_data_flows_df(
67
+ expected_dataframe: pd.DataFrame,
68
+ hydrology_plus_export_object: HydrologyPlusExport,
69
+ ):
70
+ """Compares the df with all the flows between the csv and the class"""
71
+ pd.testing.assert_frame_equal(expected_dataframe, hydrology_plus_export_object.data)
72
+
73
+
74
+ def test_data_event(expected_event: pd.Series, hydrology_plus_export_object: HydrologyPlusExport):
75
+ """Compares the event between the csv and the class"""
76
+ pd.testing.assert_series_equal(
77
+ expected_event,
78
+ hydrology_plus_export_object.get_event_flow("2020 Upper - 11 - 1"),
79
+ )
80
+
81
+
82
+ def test_data_event_from_params(
83
+ expected_event: pd.Series,
84
+ hydrology_plus_export_object: HydrologyPlusExport,
85
+ ):
86
+ """Compares the event between the csv and the class"""
87
+ pd.testing.assert_series_equal(
88
+ expected_event,
89
+ hydrology_plus_export_object.get_event_flow(
90
+ scenario="2020 Upper",
91
+ storm_duration=11.0,
92
+ return_period=1.0,
93
+ ),
94
+ )
95
+
96
+
97
+ def test_get_unique_event_components(hydrology_plus_export_object: HydrologyPlusExport):
98
+ """Test that unique event components are correctly extracted."""
99
+ # Expected unique components
100
+ expected_return_periods = sorted({1, 2, 5, 10, 30, 50, 75, 100, 200, 1000})
101
+ expected_storm_durations = sorted({11})
102
+ expected_scenarios = sorted({"2020 Upper", "Reconciled Baseline"})
103
+
104
+ # Trigger the _get_unique_event_components method
105
+ hydrology_plus_export_object._get_unique_event_components()
106
+
107
+ # Assertions
108
+ assert hydrology_plus_export_object.return_periods == expected_return_periods
109
+ assert hydrology_plus_export_object.storm_durations == expected_storm_durations
110
+ assert hydrology_plus_export_object.scenarios == expected_scenarios
111
+
112
+
113
+ def test_invalid_header_in_file(tmp_path):
114
+ """Test that _read raises a ValueError if the file header is invalid."""
115
+ # Create a temporary file with an invalid header
116
+ invalid_header_file = tmp_path / "invalid_header.csv"
117
+ invalid_header_content = "Invalid Header\nSome other content\n"
118
+ invalid_header_file.write_text(invalid_header_content)
119
+
120
+ with pytest.raises(FloodModellerAPIError):
121
+ HydrologyPlusExport(invalid_header_file)
122
+
123
+
124
+ def test_gereate_ief_files(test_workspace, hydrology_plus_export_object: HydrologyPlusExport):
125
+ iefs = hydrology_plus_export_object.generate_iefs(node_label="pytest")
126
+ assert len(iefs) == len(hydrology_plus_export_object.data.columns)
127
+ generated_files = list(test_workspace.glob("*_generated.ief"))
128
+ assert len(generated_files) == len(hydrology_plus_export_object.data.columns)
129
+ for file in generated_files:
130
+ file.unlink()
131
+
132
+
133
+ def test_generate_ief(
134
+ test_workspace,
135
+ hydrology_plus_export_object: HydrologyPlusExport,
136
+ ):
137
+ """Test generating a single IEF file."""
138
+ node_label = "test_node"
139
+ event = "2020 Upper - 11 - 1"
140
+
141
+ # Generate a single IEF file
142
+ generated_ief = hydrology_plus_export_object.generate_ief(
143
+ node_label=node_label,
144
+ event=event,
145
+ )
146
+
147
+ # Assert the IEF file was created and matches expectations
148
+ assert isinstance(generated_ief, IEF)
149
+ assert len(generated_ief.flowtimeprofiles) == 1
150
+ output_file = test_workspace / "2020Upper-11-1_generated.ief"
151
+ assert output_file.exists()
152
+ assert generated_ief._filepath == output_file
153
+
154
+ # Cleanup
155
+ output_file.unlink()
156
+
157
+
158
+ def test_get_qtbdy(hydrology_plus_export_object: HydrologyPlusExport, expected_event: pd.Series):
159
+ """Test generating a QTBDY object."""
160
+ qtbdy_name = "test_qtbdy"
161
+ event = "2020 Upper - 11 - 1"
162
+
163
+ # Generate a QTBDY object
164
+ qtbdy = hydrology_plus_export_object.get_qtbdy(
165
+ qtbdy_name=qtbdy_name,
166
+ event=event,
167
+ )
168
+
169
+ # Assert the QTBDY object is created and contains expected data
170
+ assert isinstance(qtbdy, QTBDY)
171
+ pd.testing.assert_series_equal(qtbdy.data, expected_event)
172
+ assert qtbdy.name == qtbdy_name
173
+
174
+ # assert QTBDY is valid
175
+ qtbdy._write()
176
+
177
+
178
+ def test_get_flowtimeprofile(hydrology_plus_export_object: HydrologyPlusExport):
179
+ """Test generating a FlowTimeProfile object."""
180
+ node_label = "test_node"
181
+ event = "2020 Upper - 11 - 1"
182
+
183
+ # Generate a FlowTimeProfile object
184
+ ftp = hydrology_plus_export_object.get_flowtimeprofile(node_label=node_label, event=event)
185
+
186
+ # Assert the FlowTimeProfile object is created and contains expected attributes
187
+ assert isinstance(ftp, FlowTimeProfile)
188
+ assert ftp.labels == [node_label]
189
+ assert ftp.csv_filepath == hydrology_plus_export_object._filepath.name
190
+ assert ftp.profile == f"{event} - Flow (m3/s)"
191
+
192
+
193
+ @pytest.mark.parametrize(
194
+ "export_csv",
195
+ [
196
+ "hplus_export_example_1.csv",
197
+ "hplus_export_example_2.csv",
198
+ "hplus_export_example_3.csv",
199
+ "hplus_export_example_4.csv",
200
+ "hplus_export_example_5.csv",
201
+ "hplus_export_example_6.csv",
202
+ "hplus_export_example_7.csv",
203
+ "hplus_export_example_8.csv",
204
+ "hplus_export_example_9.csv",
205
+ "hplus_export_example_10.csv",
206
+ ],
207
+ )
208
+ def test_load_hydrology_plus_export_doesnt_fail(test_workspace, export_csv):
209
+ """Ensure loading a hydrology+ export file succeeds without error"""
210
+ HydrologyPlusExport(test_workspace / export_csv)
@@ -10,6 +10,11 @@ def ied_fp(test_workspace):
10
10
  return Path(test_workspace, "network.ied")
11
11
 
12
12
 
13
+ @pytest.fixture
14
+ def ied_fp_comments(test_workspace):
15
+ return Path(test_workspace, "network_with_comments.ied")
16
+
17
+
13
18
  def test_open_ied_does_not_change_file(ied_fp):
14
19
  """IED: Test str representation equal to ied file with no changes"""
15
20
  with open(ied_fp) as ied_file:
@@ -27,3 +32,10 @@ def test_open_ied_does_not_change_file(ied_fp):
27
32
  " 0.000 1.000e+09",
28
33
  ]
29
34
  assert ied.boundaries["CS26"]._write() == cs26_expected
35
+
36
+
37
+ def test_ied_with_comments(ied_fp_comments):
38
+ with open(ied_fp_comments) as ied_file:
39
+ data_before = ied_file.read()
40
+ ied = IED(ied_fp_comments)
41
+ assert ied._write() == data_before
@@ -4,6 +4,7 @@ from unittest.mock import call, patch
4
4
  import pytest
5
5
 
6
6
  from floodmodeller_api import IEF
7
+ from floodmodeller_api.ief import FlowTimeProfile
7
8
  from floodmodeller_api.util import FloodModellerAPIError
8
9
 
9
10
 
@@ -17,12 +18,6 @@ def ief(ief_fp: Path) -> IEF:
17
18
  return IEF(ief_fp)
18
19
 
19
20
 
20
- @pytest.fixture()
21
- def data_before(ief_fp: Path) -> str:
22
- with open(ief_fp) as ief_file:
23
- return ief_file.read()
24
-
25
-
26
21
  @pytest.fixture()
27
22
  def exe_bin(tmpdir) -> Path:
28
23
  for exe in ["ISISf32.exe", "ISISf32_DoubleP.exe"]:
@@ -43,9 +38,54 @@ def sleep():
43
38
  yield sleep
44
39
 
45
40
 
46
- def test_ief_open_does_not_change_data(ief: IEF, data_before: str):
47
- """IEF: Test str representation equal to ief file with no changes"""
48
- assert ief._write() == data_before
41
+ def test_ief_read_doesnt_change_data(test_workspace, tmpdir):
42
+ """IEF: Check all '.ief' files in folder by reading the _write() output into a new IEF instance and checking it stays the same."""
43
+ for ief_file in Path(test_workspace).glob("*.ief"):
44
+ ief = IEF(ief_file)
45
+ first_output = ief._write()
46
+ new_path = Path(tmpdir) / "tmp.ief"
47
+ ief.save(new_path)
48
+ second_ief = IEF(new_path)
49
+ assert ief == second_ief # Checks equivalence on the class itself
50
+ second_output = second_ief._write()
51
+ assert first_output == second_output
52
+
53
+
54
+ def test_update_property(ief):
55
+ """Check if updating a property is correctly reflected in _write"""
56
+ ief.title = "updated_property"
57
+ assert "Title=updated_property" in ief._write()
58
+
59
+
60
+ def test_delete_property(ief):
61
+ del ief.slot
62
+ assert "Slot=1" not in ief._write()
63
+
64
+
65
+ def test_add_new_group_property(ief):
66
+ ief.FlowScaling1 = "test"
67
+ assert "FlowScaling1=test" in ief._write()
68
+ assert "[Boundary Adjustments]" in ief._write()
69
+
70
+
71
+ def test_add_flowtimeprofile(ief):
72
+ prev_output = ief._write()
73
+ ief.flowtimeprofiles = [FlowTimeProfile("lbl,2,4,../../path.csv,hplus,scoobydoo,where-are-you")]
74
+ output = ief._write()
75
+ assert prev_output in output
76
+ assert ief.noofflowtimeprofiles == 1
77
+ assert ief.noofflowtimeseries == 1
78
+ assert "[Flow Time Profiles]" in output
79
+ assert "FlowTimeProfile0=lbl,2,4,../../path.csv,hplus,scoobydoo,where-are-you" in output
80
+
81
+
82
+ def test_delete_all_flowtimeprofiles(test_workspace):
83
+ ief = IEF(test_workspace / "7082.ief")
84
+ ief.flowtimeprofiles = []
85
+ output = ief._write()
86
+ assert "[Flow Time Profiles]" not in output
87
+ assert not hasattr(ief, "noofflowtimeprofiles")
88
+ assert not hasattr(ief, "noofflowtimeseries")
49
89
 
50
90
 
51
91
  @pytest.mark.usefixtures("log_timeout")
@@ -71,7 +71,8 @@ def test_to_json_matches_expected(parameterised_objs_and_expected):
71
71
  json_dict_from_obj = json.loads(obj.to_json())["Object Attributes"]
72
72
 
73
73
  # Second, to handle the json file ..._expected.json which must be the same as the object created above.
74
- json_dict_from_file = json.load(open(json_expected))["Object Attributes"] # noqa: SIM115
74
+ with open(json_expected) as file:
75
+ json_dict_from_file = json.load(file)["Object Attributes"]
75
76
 
76
77
  # keys to ignore when testing for equivalence
77
78
  keys_to_remove = ["_filepath", "file", "_log_path"]
@@ -112,3 +113,7 @@ def test_is_jsonable_with_non_jsonable_object():
112
113
  pass
113
114
 
114
115
  assert not is_jsonable(NonJsonable())
116
+
117
+
118
+ if __name__ == "__main__":
119
+ create_expected_json_files()
@@ -0,0 +1,27 @@
1
+ import pytest
2
+
3
+ from floodmodeller_api import read_file
4
+ from floodmodeller_api.util import FloodModellerAPIError
5
+
6
+
7
+ def test_read_file(test_workspace):
8
+ for file in test_workspace.glob("*"):
9
+ if (
10
+ file.suffix.lower()
11
+ in [
12
+ ".ief",
13
+ ".dat",
14
+ ".ied",
15
+ ".xml",
16
+ ".zzn",
17
+ ".inp",
18
+ ".lf1",
19
+ ".lf2",
20
+ ]
21
+ or file.name in ["example_h+_export.csv", "Baseline_unchecked.csv"]
22
+ or file.name.startswith("hplus_export_example")
23
+ ):
24
+ read_file(file)
25
+ else:
26
+ with pytest.raises((ValueError, FloodModellerAPIError)):
27
+ read_file(file)