floodmodeller-api 0.4.4.post1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. floodmodeller_api/__init__.py +1 -0
  2. floodmodeller_api/dat.py +117 -96
  3. floodmodeller_api/hydrology_plus/__init__.py +2 -0
  4. floodmodeller_api/hydrology_plus/helper.py +23 -0
  5. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +333 -0
  6. floodmodeller_api/ied.py +93 -90
  7. floodmodeller_api/ief.py +233 -50
  8. floodmodeller_api/ief_flags.py +1 -0
  9. floodmodeller_api/logs/lf.py +5 -1
  10. floodmodeller_api/mapping.py +2 -0
  11. floodmodeller_api/test/test_conveyance.py +23 -32
  12. floodmodeller_api/test/test_data/7082.ief +28 -0
  13. floodmodeller_api/test/test_data/BaseModel_2D_Q100.ief +28 -0
  14. floodmodeller_api/test/test_data/Baseline_unchecked.csv +77 -0
  15. floodmodeller_api/test/test_data/Constant QT.ief +19 -0
  16. floodmodeller_api/test/test_data/Domain1_Q_xml_expected.json +7 -7
  17. floodmodeller_api/test/test_data/EX18_DAT_expected.json +54 -38
  18. floodmodeller_api/test/test_data/EX3_DAT_expected.json +246 -166
  19. floodmodeller_api/test/test_data/EX3_IEF_expected.json +25 -20
  20. floodmodeller_api/test/test_data/EX6_DAT_expected.json +522 -350
  21. floodmodeller_api/test/test_data/FEH boundary.ief +23 -0
  22. floodmodeller_api/test/test_data/Linked1D2D_xml_expected.json +7 -7
  23. floodmodeller_api/test/test_data/P3Panels_UNsteady.ief +25 -0
  24. floodmodeller_api/test/test_data/QT in dat file.ief +20 -0
  25. floodmodeller_api/test/test_data/T10.ief +25 -0
  26. floodmodeller_api/test/test_data/T2.ief +25 -0
  27. floodmodeller_api/test/test_data/T5.ief +25 -0
  28. floodmodeller_api/test/test_data/df_flows_hplus.csv +56 -0
  29. floodmodeller_api/test/test_data/event_hplus.csv +56 -0
  30. floodmodeller_api/test/test_data/ex4.ief +20 -0
  31. floodmodeller_api/test/test_data/ex6.ief +21 -0
  32. floodmodeller_api/test/test_data/example_h+_export.csv +77 -0
  33. floodmodeller_api/test/test_data/hplus_export_example_1.csv +72 -0
  34. floodmodeller_api/test/test_data/hplus_export_example_10.csv +77 -0
  35. floodmodeller_api/test/test_data/hplus_export_example_2.csv +79 -0
  36. floodmodeller_api/test/test_data/hplus_export_example_3.csv +77 -0
  37. floodmodeller_api/test/test_data/hplus_export_example_4.csv +131 -0
  38. floodmodeller_api/test/test_data/hplus_export_example_5.csv +77 -0
  39. floodmodeller_api/test/test_data/hplus_export_example_6.csv +131 -0
  40. floodmodeller_api/test/test_data/hplus_export_example_7.csv +131 -0
  41. floodmodeller_api/test/test_data/hplus_export_example_8.csv +131 -0
  42. floodmodeller_api/test/test_data/hplus_export_example_9.csv +131 -0
  43. floodmodeller_api/test/test_data/network_dat_expected.json +312 -210
  44. floodmodeller_api/test/test_data/network_ied_expected.json +6 -6
  45. floodmodeller_api/test/test_data/network_with_comments.ied +55 -0
  46. floodmodeller_api/test/test_flowtimeprofile.py +133 -0
  47. floodmodeller_api/test/test_hydrology_plus_export.py +210 -0
  48. floodmodeller_api/test/test_ied.py +12 -0
  49. floodmodeller_api/test/test_ief.py +49 -9
  50. floodmodeller_api/test/test_json.py +6 -1
  51. floodmodeller_api/test/test_read_file.py +27 -0
  52. floodmodeller_api/test/test_river.py +169 -0
  53. floodmodeller_api/to_from_json.py +7 -1
  54. floodmodeller_api/tool.py +6 -10
  55. floodmodeller_api/units/__init__.py +11 -1
  56. floodmodeller_api/units/conveyance.py +101 -212
  57. floodmodeller_api/units/sections.py +120 -39
  58. floodmodeller_api/util.py +2 -0
  59. floodmodeller_api/version.py +1 -1
  60. floodmodeller_api/xml2d.py +20 -13
  61. floodmodeller_api/xsd_backup.xml +738 -0
  62. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/METADATA +2 -1
  63. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/RECORD +67 -33
  64. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/WHEEL +1 -1
  65. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/LICENSE.txt +0 -0
  66. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/entry_points.txt +0 -0
  67. {floodmodeller_api-0.4.4.post1.dist-info → floodmodeller_api-0.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,333 @@
1
+ """
2
+ Flood Modeller Python API
3
+ Copyright (C) 2024 Jacobs U.K. Limited
4
+
5
+ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
+ as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
+
8
+ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
+ of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
+
11
+ You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
+
13
+ If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
+ address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ from pathlib import Path
20
+
21
+ import pandas as pd
22
+
23
+ from .._base import FMFile
24
+ from ..ief import IEF, FlowTimeProfile
25
+ from ..units import QTBDY
26
+ from ..util import handle_exception
27
+
28
+
29
+ class HydrologyPlusExport(FMFile):
30
+ """Class to handle the exported output of Hydrology +
31
+
32
+ Args:
33
+ csv_file_path (str | Path): produced by Hydrology + in Flood Modeller
34
+
35
+ Output:
36
+ Initiates 'HydrologyPlusExport' object
37
+ The event/s needed to run simulations in Flood Modeller
38
+ """
39
+
40
+ _filetype: str = "HydrologyPlusExport"
41
+ _suffix: str = ".csv"
42
+
43
+ @handle_exception(when="read")
44
+ def __init__(self, csv_file_path: str | Path, from_json: bool = False):
45
+ if from_json:
46
+ return
47
+ FMFile.__init__(self, csv_file_path)
48
+ self._read()
49
+
50
+ def _read(self):
51
+ with self._filepath.open("r") as file:
52
+ header = file.readline().strip(" ,\n\r")
53
+ if header != "Flood Modeller Hydrology+ hydrograph file":
54
+ raise ValueError("Input file is not the correct format for Hydrology+ export data.")
55
+
56
+ self._data_file = pd.read_csv(self._filepath)
57
+ self._metadata = self._get_metadata()
58
+ self._data = self._get_df_hydrographs_plus()
59
+ self._get_unique_event_components()
60
+
61
+ def _get_metadata(self) -> dict[str, str]:
62
+ """Extracts the metada from the hydrology + results"""
63
+ metadata_row_index = self._data_file.index[self._data_file.iloc[:, 0] == "Return Period"][0]
64
+ metadata_df = self._data_file.iloc[:metadata_row_index, 0].tolist()
65
+
66
+ return dict([row.split("=") for row in metadata_df if isinstance(row, str)])
67
+
68
+ def _get_df_hydrographs_plus(self) -> pd.DataFrame:
69
+ """Extracts all the events generated in hydrology +"""
70
+ self._time_row_index_from_df = (
71
+ self._data_file.index[self._data_file.iloc[:, 0] == "Time (hours)"][0] + 1
72
+ )
73
+ self._time_row_index_from_csv = self._time_row_index_from_df + 2
74
+ return pd.read_csv(self._filepath, skiprows=self._time_row_index_from_df, index_col=0)
75
+
76
+ def _get_event(
77
+ self,
78
+ event: str | None = None,
79
+ return_period: float | None = None,
80
+ storm_duration: float | None = None,
81
+ scenario: str | None = None,
82
+ ) -> str:
83
+ """Get exact column name based on event or individual params"""
84
+ if event:
85
+ return next(col for col in self.data.columns if col.lower().startswith(event.lower()))
86
+
87
+ if not (return_period and storm_duration and scenario):
88
+ raise ValueError(
89
+ "Missing required inputs to find event, if no event string is passed then a "
90
+ "return_period, storm_duration and scenario are needed. You provided: "
91
+ f"{return_period=}, {storm_duration=}, {scenario=}",
92
+ )
93
+ for column in self.data.columns:
94
+ s, sd, rp, *_ = column.split(" - ")
95
+ if s == scenario and float(sd) == storm_duration and float(rp) == return_period:
96
+ return column
97
+ else:
98
+ raise ValueError(
99
+ "No matching event was found based on "
100
+ f"{return_period=}, {storm_duration=}, {scenario=}",
101
+ )
102
+
103
+ def get_event_flow(
104
+ self,
105
+ event: str | None = None,
106
+ return_period: float | None = None,
107
+ storm_duration: float | None = None,
108
+ scenario: str | None = None,
109
+ ) -> pd.Series:
110
+ """Extracts a specific event's flow data from the exported Hydrology+ flow data.
111
+
112
+ Args:
113
+ event (str, optional): Full string identifier for the event in the dataset. If provided, this takes precedence over other parameters.
114
+ return_period (float, optional): The return period of the event.
115
+ storm_duration (float, optional): The duration of the storm event in hours.
116
+ scenario (str, optional): The scenario name, which typically relates to different conditions (e.g., climate change scenario).
117
+
118
+ Returns:
119
+ pd.Series: A pandas Series containing the flow data (m³/s) for the specified event.
120
+
121
+ Raises:
122
+ FloodModellerAPIError: If the csv file is not in the correct format.
123
+ ValueError: If the `event` arg is not provided and one or more of `return_period`, `storm_duration`, or `scenario` is missing.
124
+ ValueError: If no matching event is found in the dataset.
125
+
126
+ Note:
127
+ - If the `event` parameter is provided, the method returns the data corresponding to that event.
128
+ - If `event` is not provided, the method attempts to locate the event based on the combination of `return_period`, `storm_duration`, and `scenario`.
129
+ - The dataset is assumed to have columns named in the format "scenario - storm_duration - return_period - Flow (m3/s)".
130
+ """
131
+
132
+ column = self._get_event(event, return_period, storm_duration, scenario)
133
+ return self.data.loc[:, column]
134
+
135
+ def _get_unique_event_components(self):
136
+ return_periods, storm_durations, scenarios = set(), set(), set()
137
+ for column in self.data.columns:
138
+ s, sd, rp, *_ = column.split(" - ")
139
+ return_periods.add(float(rp))
140
+ storm_durations.add(float(sd))
141
+ scenarios.add(s)
142
+ self._return_periods = sorted(return_periods)
143
+ self._storm_durations = sorted(storm_durations)
144
+ self._scenarios = sorted(scenarios)
145
+
146
+ @property
147
+ def data(self) -> pd.DataFrame:
148
+ "Hydrograph flow data for all events as a pandas DataFrame."
149
+ return self._data
150
+
151
+ @property
152
+ def metadata(self) -> dict[str, str]:
153
+ "Metadata associated with Hydrology+ csv export."
154
+ return self._metadata
155
+
156
+ @property
157
+ def return_periods(self) -> list:
158
+ "Distinct return periods from exported Hydrology+ data"
159
+ return self._return_periods
160
+
161
+ @property
162
+ def storm_durations(self) -> list:
163
+ "Distinct storm durations from exported Hydrology+ data"
164
+ return self._storm_durations
165
+
166
+ @property
167
+ def scenarios(self) -> list:
168
+ "Distinct scenarios from exported Hydrology+ data"
169
+ return self._scenarios
170
+
171
+ def _get_output_ief_path(self, event: str) -> Path:
172
+ column_output_name = event.replace("- Flow (m3/s)", "").replace(" ", "")
173
+ return self._filepath.with_name(f"{column_output_name}_generated.ief")
174
+
175
+ def generate_iefs(
176
+ self,
177
+ node_label: str,
178
+ template_ief: IEF | Path | str | None = None,
179
+ ) -> list[IEF]:
180
+ """Generates a set of IEF files for all available events in the Hydrology+ Export file.
181
+
182
+ The IEF files are saved to disk in the same location as the Hydrology+ Export file and are
183
+ named with the pattern {profile name}_generated.ief. They are also returned as a list of IEF
184
+ instances for further editing/saving if desired.
185
+
186
+ Args:
187
+ node_label (str): Node label in model network to associate flow data with.
188
+ template_ief (IEF | Path | str | None, optional): A template IEF instance, a file path, or
189
+ a string representing the path to an IEF. If not provided, a new blank IEF instance is created.
190
+
191
+ Returns:
192
+ list[IEF]: A list of IEF instances, one for each event.
193
+ """
194
+ if template_ief is None:
195
+ template_ief = IEF()
196
+
197
+ elif isinstance(template_ief, (Path, str)):
198
+ template_ief = IEF(template_ief)
199
+
200
+ generated_iefs = []
201
+ for column in self.data.columns:
202
+ generated_iefs.append(self.generate_ief(node_label, template_ief, event=column))
203
+
204
+ return generated_iefs
205
+
206
+ def generate_ief( # noqa: PLR0913
207
+ self,
208
+ node_label: str,
209
+ template_ief: IEF | Path | str | None = None,
210
+ event: str | None = None,
211
+ return_period: float | None = None,
212
+ storm_duration: float | None = None,
213
+ scenario: str | None = None,
214
+ ) -> IEF:
215
+ """Generates a single IEF file for the requested event.
216
+
217
+ The IEF file is saved to disk in the same location as the Hydrology+ Export file and is
218
+ named with the pattern {profile name}_generated.ief. The IEF instance is also returned for
219
+ further editing/saving if desired.
220
+
221
+ Args:
222
+ node_label (str): Node label in model network to associate flow data with.
223
+ template_ief (IEF | Path | str | None, optional): A template IEF instance, a file path, or
224
+ a string representing the path to an IEF. If not provided, a new blank IEF instance is created.
225
+ event (str, optional): Full string identifier for the event in the dataset. If provided, this takes precedence over other parameters.
226
+ return_period (float, optional): The return period of the event.
227
+ storm_duration (float, optional): The duration of the storm event in hours.
228
+ scenario (str, optional): The scenario name, which typically relates to different conditions (e.g., climate change scenario).
229
+
230
+ Returns:
231
+ IEF: An IEF instance.
232
+ """
233
+ _template_ief: IEF
234
+ if template_ief is None:
235
+ _template_ief = IEF()
236
+
237
+ elif isinstance(template_ief, (Path, str)):
238
+ _template_ief = IEF(template_ief)
239
+
240
+ else:
241
+ _template_ief = template_ief
242
+
243
+ flowtimeprofile = self.get_flowtimeprofile(
244
+ node_label,
245
+ event,
246
+ return_period,
247
+ storm_duration,
248
+ scenario,
249
+ )
250
+ _template_ief.flowtimeprofiles.append(flowtimeprofile)
251
+ output_ief_path = self._get_output_ief_path(flowtimeprofile.profile)
252
+ _template_ief.save(output_ief_path)
253
+ generated_ief = IEF(output_ief_path)
254
+ _template_ief.flowtimeprofiles = _template_ief.flowtimeprofiles[:-1]
255
+
256
+ return generated_ief
257
+
258
+ def get_flowtimeprofile(
259
+ self,
260
+ node_label: str,
261
+ event: str | None = None,
262
+ return_period: float | None = None,
263
+ storm_duration: float | None = None,
264
+ scenario: str | None = None,
265
+ ) -> FlowTimeProfile:
266
+ """Generates a FlowTimeProfile object based on the requested event.
267
+
268
+ Args:
269
+ node_label (str): Node label in model network to associate flow data with.
270
+ event (str, optional): Full string identifier for the event in the dataset. If provided, this takes precedence over other parameters.
271
+ return_period (float, optional): The return period of the event.
272
+ storm_duration (float, optional): The duration of the storm event in hours.
273
+ scenario (str, optional): The scenario name, which typically relates to different conditions (e.g., climate change scenario).
274
+
275
+ Returns:
276
+ FlowTimeProfile: A FlowTimeProfile object containing the attributes required for an IEF.
277
+
278
+ Raises:
279
+ FloodModellerAPIError: If the csv file is not in the correct format.
280
+ ValueError: If the `event` arg is not provided and one or more of `return_period`, `storm_duration`, or `scenario` is missing.
281
+ ValueError: If no matching event is found in the dataset.
282
+
283
+ Note:
284
+ - If the `event` parameter is provided, the method returns the data corresponding to that event.
285
+ - If `event` is not provided, the method attempts to locate the event based on the combination of `return_period`, `storm_duration`, and `scenario`.
286
+ - The dataset is assumed to have columns named in the format "scenario - storm_duration - return_period - Flow (m3/s)".
287
+ """
288
+ column = self._get_event(event, return_period, storm_duration, scenario)
289
+ index = list(self.data.columns).index(column)
290
+ return FlowTimeProfile(
291
+ labels=[node_label],
292
+ columns=[index + 2],
293
+ start_row=self._time_row_index_from_csv,
294
+ csv_filepath=self._filepath.name,
295
+ file_type="hplus",
296
+ profile=column,
297
+ comment="Generated by HydrologyPlusExport",
298
+ )
299
+
300
+ def get_qtbdy(
301
+ self,
302
+ qtbdy_name: str | None,
303
+ event: str | None = None,
304
+ return_period: float | None = None,
305
+ storm_duration: float | None = None,
306
+ scenario: str | None = None,
307
+ **kwargs,
308
+ ) -> QTBDY:
309
+ """Generates a QTBDY unit based on the flow time series of the requested event.
310
+
311
+ Args:
312
+ qtbdy_name (str, optional): Name of the new QTBDY unit. If not provided a default name is used.
313
+ event (str, optional): Full string identifier for the event in the dataset. If provided, this takes precedence over other parameters.
314
+ return_period (float, optional): The return period of the event.
315
+ storm_duration (float, optional): The duration of the storm event in hours.
316
+ scenario (str, optional): The scenario name, which typically relates to different conditions (e.g., climate change scenario).
317
+ **kwargs: Additional keyword args can be passed to build the QTBDY unit. See :class:`~floodmodeller_api.units.QTBDY` for details.
318
+
319
+ Returns:
320
+ QTBDY: A QTBDY object containing the flow data (m³/s) for the specified event.
321
+
322
+ Raises:
323
+ FloodModellerAPIError: If the csv file is not in the correct format.
324
+ ValueError: If the `event` arg is not provided and one or more of `return_period`, `storm_duration`, or `scenario` is missing.
325
+ ValueError: If no matching event is found in the dataset.
326
+
327
+ Note:
328
+ - If the `event` parameter is provided, the method returns the data corresponding to that event.
329
+ - If `event` is not provided, the method attempts to locate the event based on the combination of `return_period`, `storm_duration`, and `scenario`.
330
+ - The dataset is assumed to have columns named in the format "scenario - storm_duration - return_period - Flow (m3/s)".
331
+ """
332
+ flow_data = self.get_event_flow(event, return_period, storm_duration, scenario)
333
+ return QTBDY(name=qtbdy_name, data=flow_data, **kwargs)
floodmodeller_api/ied.py CHANGED
@@ -67,8 +67,34 @@ class IED(FMFile):
67
67
  self._update_ied_struct()
68
68
 
69
69
  @handle_exception(when="write")
70
- def _write(self) -> str: # noqa: C901, PLR0912
70
+ def _write(self) -> str:
71
71
  """Returns string representation of the current IED data"""
72
+ self._write_raw_data()
73
+ self._update_ied_struct()
74
+ self._update_unit_names()
75
+
76
+ return "\n".join(self._raw_data) + "\n"
77
+
78
+ def _update_unit_names(self) -> None:
79
+ for unit_group, unit_group_name in [
80
+ (self.boundaries, "boundaries"),
81
+ (self.sections, "sections"),
82
+ (self.structures, "structures"),
83
+ (self.conduits, "conduits"),
84
+ (self.losses, "losses"),
85
+ ]:
86
+ for name, unit in unit_group.copy().items():
87
+ if name != unit.name:
88
+ # Check if new name already exists as a label
89
+ if unit.name in unit_group:
90
+ raise Exception(
91
+ f'Error: Cannot update label "{name}" to "{unit.name}" because '
92
+ f'"{unit.name}" already exists in the Network {unit_group_name} group',
93
+ )
94
+ unit_group[unit.name] = unit
95
+ del unit_group[name]
96
+
97
+ def _write_raw_data(self) -> None:
72
98
  block_shift = 0
73
99
  existing_units: dict[str, list[str]] = {
74
100
  "boundaries": [],
@@ -77,6 +103,8 @@ class IED(FMFile):
77
103
  "conduits": [],
78
104
  "losses": [],
79
105
  }
106
+ comment_tracker = 0
107
+ comment_units = [unit for unit in self._all_units if unit._unit == "COMMENT"]
80
108
 
81
109
  for block in self._ied_struct:
82
110
  # Check for all supported boundary types
@@ -85,22 +113,29 @@ class IED(FMFile):
85
113
  block["start"] + block_shift : block["end"] + 1 + block_shift
86
114
  ]
87
115
  prev_block_len = len(unit_data)
88
- if units.SUPPORTED_UNIT_TYPES[block["Type"]]["has_subtype"]:
89
- unit_name = unit_data[2][:12].strip()
90
- else:
91
- unit_name = unit_data[1][:12].strip()
92
116
 
93
- # Get unit object
94
- unit_group = getattr(self, units.SUPPORTED_UNIT_TYPES[block["Type"]]["group"])
95
- if unit_name in unit_group:
96
- # block still exists
97
- new_unit_data = unit_group[unit_name]._write()
98
- existing_units[units.SUPPORTED_UNIT_TYPES[block["Type"]]["group"]].append(
99
- unit_name,
100
- )
117
+ if block["Type"] == "COMMENT":
118
+ comment = comment_units[comment_tracker]
119
+ new_unit_data = comment._write()
120
+ comment_tracker += 1
121
+
101
122
  else:
102
- # Bdy block has been deleted
103
- new_unit_data = []
123
+ if units.SUPPORTED_UNIT_TYPES[block["Type"]]["has_subtype"]:
124
+ unit_name = unit_data[2][:12].strip()
125
+ else:
126
+ unit_name = unit_data[1][:12].strip()
127
+
128
+ # Get unit object
129
+ unit_group = getattr(self, units.SUPPORTED_UNIT_TYPES[block["Type"]]["group"])
130
+ if unit_name in unit_group:
131
+ # block still exists
132
+ new_unit_data = unit_group[unit_name]._write()
133
+ existing_units[units.SUPPORTED_UNIT_TYPES[block["Type"]]["group"]].append(
134
+ unit_name,
135
+ )
136
+ else:
137
+ # Bdy block has been deleted
138
+ new_unit_data = []
104
139
 
105
140
  new_block_len = len(new_unit_data)
106
141
  self._raw_data[block["start"] + block_shift : block["end"] + 1 + block_shift] = (
@@ -117,29 +152,6 @@ class IED(FMFile):
117
152
  # Ensure that the 'name' attribute matches name key in boundaries
118
153
  self._raw_data.extend(unit._write())
119
154
 
120
- # Update ied_struct
121
- self._update_ied_struct()
122
-
123
- # Update unit names
124
- for unit_group, unit_group_name in [
125
- (self.boundaries, "boundaries"),
126
- (self.sections, "sections"),
127
- (self.structures, "structures"),
128
- (self.conduits, "conduits"),
129
- (self.losses, "losses"),
130
- ]:
131
- for name, unit in unit_group.copy().items():
132
- if name != unit.name:
133
- # Check if new name already exists as a label
134
- if unit.name in unit_group:
135
- raise Exception(
136
- f'Error: Cannot update label "{name}" to "{unit.name}" because "{unit.name}" already exists in the Network {unit_group_name} group',
137
- )
138
- unit_group[unit.name] = unit
139
- del unit_group[name]
140
-
141
- return "\n".join(self._raw_data) + "\n"
142
-
143
155
  def _get_unit_definitions(self):
144
156
  # Get unit definitions
145
157
  self.sections = {}
@@ -153,6 +165,11 @@ class IED(FMFile):
153
165
  unit_data = self._raw_data[block["start"] : block["end"] + 1]
154
166
  # Check for all supported boundary types, starting just with QTBDY type
155
167
  if block["Type"] in units.SUPPORTED_UNIT_TYPES:
168
+ # Handle comments
169
+ if block["Type"] == "COMMENT":
170
+ self._all_units.append(units.COMMENT(unit_data, n=12))
171
+ continue
172
+
156
173
  # Check to see whether unit type has associated subtypes so that unit name can be correctly assigned
157
174
  if units.SUPPORTED_UNIT_TYPES[block["Type"]]["has_subtype"]:
158
175
  # Takes first 12 characters as name
@@ -191,7 +208,7 @@ class IED(FMFile):
191
208
 
192
209
  print()
193
210
 
194
- def _update_ied_struct(self): # noqa: C901, PLR0912, PLR0915
211
+ def _update_ied_struct(self): # noqa: C901, PLR0912
195
212
  # Generate IED structure
196
213
  ied_struct = []
197
214
  in_block = False
@@ -199,74 +216,60 @@ class IED(FMFile):
199
216
  comment_n = None
200
217
  in_comment = False
201
218
 
219
+ def _finalise_block(block: dict, struct: list, end: int) -> list:
220
+ block["end"] = end
221
+ struct.append(block)
222
+ return struct
223
+
202
224
  for idx, line in enumerate(self._raw_data):
225
+ split_line = line.split(" ")
226
+
203
227
  # Deal with comment blocks explicitly as they could contain unit keywords
204
- if in_comment and comment_n is None:
205
- comment_n = int(line.strip())
206
- continue
207
228
  if in_comment:
229
+ if comment_n is None:
230
+ comment_n = int(line.strip())
231
+ continue
232
+
208
233
  comment_n -= 1
209
- if comment_n == 0:
210
- bdy_block["end"] = idx # add ending index
211
- # append existing bdy block to the ied_struct
212
- ied_struct.append(bdy_block)
213
- bdy_block = {} # reset bdy block
214
- in_comment = False
215
- in_block = False
216
- comment_n = None
217
- continue # move onto next line as still in comment block
234
+ if comment_n != 0:
235
+ continue
236
+
237
+ ied_struct = _finalise_block(bdy_block, ied_struct, idx)
238
+ bdy_block = {}
239
+ in_comment = False
240
+ in_block = False
241
+ comment_n = None
242
+ continue
218
243
 
219
244
  if line == "COMMENT":
220
245
  in_comment = True
221
- if in_block is True:
222
- bdy_block["end"] = idx - 1 # add ending index
223
- # append existing bdy block to the ied_struct
224
- ied_struct.append(bdy_block)
225
- bdy_block = {} # reset bdy block
226
- # start new block for COMMENT
227
- bdy_block["Type"] = line.split(" ")[0]
228
- bdy_block["start"] = idx # add starting index
229
- continue
246
+ unit_type = line
247
+
248
+ elif len(split_line[0]) > 1:
249
+ if split_line[0] in units.ALL_UNIT_TYPES:
250
+ unit_type = split_line[0]
251
+
252
+ elif " ".join(split_line[:2]) in units.ALL_UNIT_TYPES:
253
+ unit_type = " ".join(split_line[:2])
230
254
 
231
- if len(line.split(" ")[0]) > 1:
232
- if line.split(" ")[0] in units.ALL_UNIT_TYPES:
233
- if in_block is True:
234
- bdy_block["end"] = idx - 1 # add ending index
235
- # append existing bdy block to the ief_struct
236
- ied_struct.append(bdy_block)
237
- bdy_block = {} # reset bdy block
238
- in_block = True
239
- bdy_block["Type"] = line.split(" ")[0] # start new bdy block
240
- bdy_block["start"] = idx # add starting index
241
-
242
- elif " ".join(line.split(" ")[:2]) in units.ALL_UNIT_TYPES:
243
- if in_block is True:
244
- bdy_block["end"] = idx - 1 # add ending index
245
- # append existing bdy block to the ief_struct
246
- ied_struct.append(bdy_block)
247
- bdy_block = {} # reset bdy block
248
- in_block = True
249
- bdy_block["Type"] = " ".join(line.split(" ")[:2]) # start new bdy block
250
- bdy_block["start"] = idx # add starting index
251
255
  else:
252
256
  continue
257
+
253
258
  elif line in units.ALL_UNIT_TYPES:
254
- if in_block is True:
255
- bdy_block["end"] = idx - 1 # add ending index
256
- # append existing bdy block to the ief_struct
257
- ied_struct.append(bdy_block)
258
- bdy_block = {} # reset bdy block
259
- in_block = True
260
- bdy_block["Type"] = line # start new bdy block
261
- bdy_block["start"] = idx # add starting index
259
+ unit_type = line
260
+
262
261
  else:
263
262
  continue
264
263
 
264
+ if in_block is True:
265
+ ied_struct = _finalise_block(bdy_block, ied_struct, idx - 1)
266
+ bdy_block = {"Type": unit_type, "start": idx}
267
+ in_block = True
268
+
265
269
  if len(bdy_block) != 0:
266
270
  # Only adds end block if there is a bdy block present (i.e. an empty IED stays empty)
267
271
  # add ending index for final block
268
- bdy_block["end"] = len(self._raw_data) - 1
269
- ied_struct.append(bdy_block) # add final block
272
+ ied_struct = _finalise_block(bdy_block, ied_struct, len(self._raw_data) - 1)
270
273
 
271
274
  self._ied_struct = ied_struct
272
275