flood-adapt 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. flood_adapt/__init__.py +22 -0
  2. flood_adapt/adapter/__init__.py +9 -0
  3. flood_adapt/adapter/fiat_adapter.py +1502 -0
  4. flood_adapt/adapter/interface/__init__.py +0 -0
  5. flood_adapt/adapter/interface/hazard_adapter.py +70 -0
  6. flood_adapt/adapter/interface/impact_adapter.py +36 -0
  7. flood_adapt/adapter/interface/model_adapter.py +89 -0
  8. flood_adapt/adapter/interface/offshore.py +19 -0
  9. flood_adapt/adapter/sfincs_adapter.py +1857 -0
  10. flood_adapt/adapter/sfincs_offshore.py +193 -0
  11. flood_adapt/config/__init__.py +0 -0
  12. flood_adapt/config/config.py +245 -0
  13. flood_adapt/config/fiat.py +219 -0
  14. flood_adapt/config/gui.py +224 -0
  15. flood_adapt/config/sfincs.py +336 -0
  16. flood_adapt/config/site.py +124 -0
  17. flood_adapt/database_builder/__init__.py +0 -0
  18. flood_adapt/database_builder/database_builder.py +2175 -0
  19. flood_adapt/database_builder/templates/default_units/imperial.toml +9 -0
  20. flood_adapt/database_builder/templates/default_units/metric.toml +9 -0
  21. flood_adapt/database_builder/templates/green_infra_table/green_infra_lookup_table.csv +10 -0
  22. flood_adapt/database_builder/templates/icons/black_down_48x48.png +0 -0
  23. flood_adapt/database_builder/templates/icons/black_left_48x48.png +0 -0
  24. flood_adapt/database_builder/templates/icons/black_right_48x48.png +0 -0
  25. flood_adapt/database_builder/templates/icons/black_up_48x48.png +0 -0
  26. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-16_white_down.png +0 -0
  27. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-16_white_left.png +0 -0
  28. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-16_white_right.png +0 -0
  29. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-16_white_up.png +0 -0
  30. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-24_black_down.png +0 -0
  31. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-24_black_left.png +0 -0
  32. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-24_black_right.png +0 -0
  33. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-24_black_up.png +0 -0
  34. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-24_white_left.png +0 -0
  35. flood_adapt/database_builder/templates/icons/icons8-triangle-arrow-24_white_right.png +0 -0
  36. flood_adapt/database_builder/templates/icons/white_down_48x48.png +0 -0
  37. flood_adapt/database_builder/templates/icons/white_left_48x48.png +0 -0
  38. flood_adapt/database_builder/templates/icons/white_right_48x48.png +0 -0
  39. flood_adapt/database_builder/templates/icons/white_up_48x48.png +0 -0
  40. flood_adapt/database_builder/templates/infographics/OSM/config_charts.toml +90 -0
  41. flood_adapt/database_builder/templates/infographics/OSM/config_people.toml +57 -0
  42. flood_adapt/database_builder/templates/infographics/OSM/config_risk_charts.toml +121 -0
  43. flood_adapt/database_builder/templates/infographics/OSM/config_roads.toml +65 -0
  44. flood_adapt/database_builder/templates/infographics/OSM/styles.css +45 -0
  45. flood_adapt/database_builder/templates/infographics/US_NSI/config_charts.toml +126 -0
  46. flood_adapt/database_builder/templates/infographics/US_NSI/config_people.toml +60 -0
  47. flood_adapt/database_builder/templates/infographics/US_NSI/config_risk_charts.toml +121 -0
  48. flood_adapt/database_builder/templates/infographics/US_NSI/config_roads.toml +65 -0
  49. flood_adapt/database_builder/templates/infographics/US_NSI/styles.css +45 -0
  50. flood_adapt/database_builder/templates/infographics/images/ambulance.png +0 -0
  51. flood_adapt/database_builder/templates/infographics/images/car.png +0 -0
  52. flood_adapt/database_builder/templates/infographics/images/cart.png +0 -0
  53. flood_adapt/database_builder/templates/infographics/images/firetruck.png +0 -0
  54. flood_adapt/database_builder/templates/infographics/images/hospital.png +0 -0
  55. flood_adapt/database_builder/templates/infographics/images/house.png +0 -0
  56. flood_adapt/database_builder/templates/infographics/images/info.png +0 -0
  57. flood_adapt/database_builder/templates/infographics/images/money.png +0 -0
  58. flood_adapt/database_builder/templates/infographics/images/person.png +0 -0
  59. flood_adapt/database_builder/templates/infographics/images/school.png +0 -0
  60. flood_adapt/database_builder/templates/infographics/images/truck.png +0 -0
  61. flood_adapt/database_builder/templates/infographics/images/walking_person.png +0 -0
  62. flood_adapt/database_builder/templates/infometrics/OSM/metrics_additional_risk_configs.toml +4 -0
  63. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config.toml +143 -0
  64. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config_risk.toml +153 -0
  65. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config.toml +127 -0
  66. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config_risk.toml +57 -0
  67. flood_adapt/database_builder/templates/infometrics/US_NSI/metrics_additional_risk_configs.toml +4 -0
  68. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config.toml +191 -0
  69. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config_risk.toml +153 -0
  70. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config.toml +178 -0
  71. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config_risk.toml +57 -0
  72. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config.toml +9 -0
  73. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config_risk.toml +65 -0
  74. flood_adapt/database_builder/templates/mapbox_layers/bin_colors.toml +5 -0
  75. flood_adapt/database_builder.py +16 -0
  76. flood_adapt/dbs_classes/__init__.py +21 -0
  77. flood_adapt/dbs_classes/database.py +716 -0
  78. flood_adapt/dbs_classes/dbs_benefit.py +97 -0
  79. flood_adapt/dbs_classes/dbs_event.py +91 -0
  80. flood_adapt/dbs_classes/dbs_measure.py +103 -0
  81. flood_adapt/dbs_classes/dbs_projection.py +52 -0
  82. flood_adapt/dbs_classes/dbs_scenario.py +150 -0
  83. flood_adapt/dbs_classes/dbs_static.py +261 -0
  84. flood_adapt/dbs_classes/dbs_strategy.py +147 -0
  85. flood_adapt/dbs_classes/dbs_template.py +302 -0
  86. flood_adapt/dbs_classes/interface/database.py +147 -0
  87. flood_adapt/dbs_classes/interface/element.py +137 -0
  88. flood_adapt/dbs_classes/interface/static.py +47 -0
  89. flood_adapt/flood_adapt.py +1371 -0
  90. flood_adapt/misc/__init__.py +0 -0
  91. flood_adapt/misc/database_user.py +16 -0
  92. flood_adapt/misc/log.py +183 -0
  93. flood_adapt/misc/path_builder.py +54 -0
  94. flood_adapt/misc/utils.py +185 -0
  95. flood_adapt/objects/__init__.py +59 -0
  96. flood_adapt/objects/benefits/__init__.py +0 -0
  97. flood_adapt/objects/benefits/benefits.py +61 -0
  98. flood_adapt/objects/events/__init__.py +0 -0
  99. flood_adapt/objects/events/event_factory.py +135 -0
  100. flood_adapt/objects/events/event_set.py +84 -0
  101. flood_adapt/objects/events/events.py +221 -0
  102. flood_adapt/objects/events/historical.py +55 -0
  103. flood_adapt/objects/events/hurricane.py +64 -0
  104. flood_adapt/objects/events/synthetic.py +48 -0
  105. flood_adapt/objects/forcing/__init__.py +0 -0
  106. flood_adapt/objects/forcing/csv.py +68 -0
  107. flood_adapt/objects/forcing/discharge.py +66 -0
  108. flood_adapt/objects/forcing/forcing.py +142 -0
  109. flood_adapt/objects/forcing/forcing_factory.py +182 -0
  110. flood_adapt/objects/forcing/meteo_handler.py +93 -0
  111. flood_adapt/objects/forcing/netcdf.py +40 -0
  112. flood_adapt/objects/forcing/plotting.py +428 -0
  113. flood_adapt/objects/forcing/rainfall.py +98 -0
  114. flood_adapt/objects/forcing/tide_gauge.py +191 -0
  115. flood_adapt/objects/forcing/time_frame.py +77 -0
  116. flood_adapt/objects/forcing/timeseries.py +552 -0
  117. flood_adapt/objects/forcing/unit_system.py +580 -0
  118. flood_adapt/objects/forcing/waterlevels.py +108 -0
  119. flood_adapt/objects/forcing/wind.py +124 -0
  120. flood_adapt/objects/measures/__init__.py +0 -0
  121. flood_adapt/objects/measures/measure_factory.py +92 -0
  122. flood_adapt/objects/measures/measures.py +506 -0
  123. flood_adapt/objects/object_model.py +68 -0
  124. flood_adapt/objects/projections/__init__.py +0 -0
  125. flood_adapt/objects/projections/projections.py +89 -0
  126. flood_adapt/objects/scenarios/__init__.py +0 -0
  127. flood_adapt/objects/scenarios/scenarios.py +22 -0
  128. flood_adapt/objects/strategies/__init__.py +0 -0
  129. flood_adapt/objects/strategies/strategies.py +68 -0
  130. flood_adapt/workflows/__init__.py +0 -0
  131. flood_adapt/workflows/benefit_runner.py +541 -0
  132. flood_adapt/workflows/floodmap.py +85 -0
  133. flood_adapt/workflows/impacts_integrator.py +82 -0
  134. flood_adapt/workflows/scenario_runner.py +69 -0
  135. flood_adapt-0.3.0.dist-info/LICENSE +21 -0
  136. flood_adapt-0.3.0.dist-info/METADATA +183 -0
  137. flood_adapt-0.3.0.dist-info/RECORD +139 -0
  138. flood_adapt-0.3.0.dist-info/WHEEL +5 -0
  139. flood_adapt-0.3.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1502 @@
1
+ import logging
2
+ import math
3
+ import os
4
+ import shutil
5
+ import subprocess
6
+ from pathlib import Path
7
+ from typing import Any, Optional, Union
8
+
9
+ import geopandas as gpd
10
+ import pandas as pd
11
+ import tomli
12
+ from fiat_toolbox import FiatColumns, get_fiat_columns
13
+ from fiat_toolbox.equity.equity import Equity
14
+ from fiat_toolbox.infographics.infographics_factory import InforgraphicFactory
15
+ from fiat_toolbox.metrics_writer.fiat_write_metrics_file import MetricsFileWriter
16
+ from fiat_toolbox.metrics_writer.fiat_write_return_period_threshold import (
17
+ ExceedanceProbabilityCalculator,
18
+ )
19
+ from fiat_toolbox.spatial_output.aggregation_areas import AggregationAreas
20
+ from fiat_toolbox.spatial_output.footprints import Footprints
21
+ from fiat_toolbox.utils import extract_variables, matches_pattern, replace_pattern
22
+ from hydromt_fiat.fiat import FiatModel
23
+
24
+ from flood_adapt.adapter.interface.impact_adapter import IImpactAdapter
25
+ from flood_adapt.config.fiat import FiatConfigModel
26
+ from flood_adapt.misc.log import FloodAdaptLogging
27
+ from flood_adapt.misc.path_builder import (
28
+ ObjectDir,
29
+ )
30
+ from flood_adapt.misc.utils import cd, resolve_filepath
31
+ from flood_adapt.objects.events.events import Mode
32
+ from flood_adapt.objects.forcing import unit_system as us
33
+ from flood_adapt.objects.measures.measures import (
34
+ Buyout,
35
+ Elevate,
36
+ FloodProof,
37
+ Measure,
38
+ MeasureType,
39
+ )
40
+ from flood_adapt.objects.projections.projections import Projection
41
+ from flood_adapt.objects.scenarios.scenarios import Scenario
42
+ from flood_adapt.workflows.floodmap import FloodMap, FloodmapType
43
+ from flood_adapt.workflows.impacts_integrator import Impacts
44
+
45
+ # Define naming structure for saved files
46
+ _IMPACT_COLUMNS = FiatColumns(
47
+ object_id="Object ID",
48
+ object_name="Object Name",
49
+ primary_object_type="Primary Object Type",
50
+ secondary_object_type="Secondary Object Type",
51
+ extraction_method="Extraction Method",
52
+ ground_floor_height="Ground Floor Height",
53
+ ground_elevation="Ground Elevation",
54
+ damage_function="Damage Function: {name}",
55
+ max_potential_damage="Max Potential Damage: {name}",
56
+ aggregation_label="Aggregation Label: {name}",
57
+ inundation_depth="Inundation Depth",
58
+ inundation_depth_rp="Inundation Depth ({years}Y)",
59
+ reduction_factor="Reduction Factor",
60
+ reduction_factor_rp="Reduction Factor ({years}Y)",
61
+ damage="Damage: {name}",
62
+ damage_rp="Damage: {name} ({years}Y)",
63
+ total_damage="Total Damage",
64
+ total_damage_rp="Total Damage ({years}Y)",
65
+ risk_ead="Risk (EAD)",
66
+ segment_length="Segment Length",
67
+ )
68
+
69
+ # Define column naming of FIAT model
70
+ _FIAT_COLUMNS: FiatColumns = get_fiat_columns(
71
+ fiat_version="0.2.1"
72
+ ) # columns of FIAT # TODO add version from config
73
+
74
+
75
+ class FiatAdapter(IImpactAdapter):
76
+ """
77
+ ImpactAdapter for Delft-FIAT.
78
+
79
+ It includes:
80
+ - preprocessing methods for adding measures, projections and hazards
81
+ - executing method for running a Delft-FIAT simulation
82
+ - postprocessing methods for saving impact results
83
+ """
84
+
85
+ # TODO deal with all the relative paths for the files used
86
+ # TODO IImpactAdapter and general Adapter class should NOT use the database
87
+
88
+ _model: FiatModel # hydroMT-FIAT model
89
+ config: Optional[FiatConfigModel] = None
90
+ exe_path: Optional[os.PathLike] = None
91
+ fiat_columns: FiatColumns
92
+ impact_columns: FiatColumns
93
+
94
+ def __init__(
95
+ self,
96
+ model_root: Path,
97
+ config: Optional[FiatConfigModel] = None,
98
+ exe_path: Optional[os.PathLike] = None,
99
+ delete_crashed_runs: bool = True,
100
+ config_base_path: Optional[os.PathLike] = None,
101
+ ) -> None:
102
+ # TODO should exe_path and delete_crashed_runs be part of the config?
103
+ # Load FIAT template
104
+ self.logger = FloodAdaptLogging.getLogger("FiatAdapter")
105
+ self.config = config
106
+ self.config_base_path = config_base_path
107
+ self.exe_path = exe_path
108
+ self.delete_crashed_runs = delete_crashed_runs
109
+ self._model = FiatModel(root=str(model_root.resolve()), mode="r")
110
+ self._model.read()
111
+ self.fiat_columns = _FIAT_COLUMNS
112
+ self.impact_columns = _IMPACT_COLUMNS # columns of FA impact output
113
+
114
+ @property
115
+ def model_root(self):
116
+ return Path(self._model.root)
117
+
118
+ @property
119
+ def damage_types(self):
120
+ """Get the damage types that are present in the exposure."""
121
+ types = []
122
+ for col in self._model.exposure.exposure_db.columns:
123
+ if matches_pattern(col, self.fiat_columns.damage_function):
124
+ name = extract_variables(col, self.fiat_columns.damage_function)["name"]
125
+ types.append(name)
126
+ return types
127
+
128
+ def read(self, path: Path) -> None:
129
+ """Read the fiat model from the current model root."""
130
+ if Path(self._model.root).resolve() != Path(path).resolve():
131
+ self._model.set_root(root=str(path), mode="r")
132
+ self._model.read()
133
+
134
+ def write(self, path_out: Union[str, os.PathLike], overwrite: bool = True) -> None:
135
+ """Write the fiat model configuration to a directory."""
136
+ if not isinstance(path_out, Path):
137
+ path_out = Path(path_out).resolve()
138
+
139
+ if not path_out.exists():
140
+ path_out.mkdir(parents=True)
141
+
142
+ write_mode = "w+" if overwrite else "w"
143
+ with cd(path_out):
144
+ self._model.set_root(root=str(path_out), mode=write_mode)
145
+ self._model.write()
146
+
147
+ def close_files(self):
148
+ """Close all open files and clean up file handles."""
149
+ if hasattr(self.logger, "handlers"):
150
+ for handler in self.logger.handlers:
151
+ if isinstance(handler, logging.FileHandler):
152
+ handler.close()
153
+ self.logger.removeHandler(handler)
154
+
155
+ def __enter__(self) -> "FiatAdapter":
156
+ return self
157
+
158
+ def __exit__(self, exc_type, exc_value, traceback) -> bool:
159
+ self.close_files()
160
+ return False
161
+
162
+ def has_run(self, scenario: Scenario) -> bool:
163
+ # TODO this should include a check for all output files , and then maybe save them as output paths and types
164
+ """
165
+ Check if the impact results file for the given scenario exists.
166
+
167
+ Parameters
168
+ ----------
169
+ scenario : Scenario
170
+ The scenario for which to check the FIAT results.
171
+
172
+ Returns
173
+ -------
174
+ bool
175
+ True if the FIAT results file exists, False otherwise.
176
+ """
177
+ impacts_path = Impacts(scenario=scenario).impacts_path
178
+
179
+ fiat_results_path = impacts_path.joinpath(
180
+ f"Impacts_detailed_{scenario.name}.csv"
181
+ )
182
+ return fiat_results_path.exists()
183
+
184
+ def delete_model(self):
185
+ """
186
+ Delete the Delft-FIAT simulation folder.
187
+
188
+ This method attempts to delete the directory specified by `self.model_root`.
189
+
190
+ Raises
191
+ ------
192
+ OSError: If the directory cannot be deleted.
193
+ """
194
+ self.logger.info("Deleting Delft-FIAT simulation folder")
195
+ try:
196
+ shutil.rmtree(self.model_root)
197
+ except OSError as e_info:
198
+ self.logger.warning(f"{e_info}\nCould not delete {self.model_root}.")
199
+
200
+ def fiat_completed(self) -> bool:
201
+ """Check if fiat has run as expected.
202
+
203
+ Returns
204
+ -------
205
+ boolean
206
+ True if fiat has run, False if something went wrong
207
+ """
208
+ log_file = self.model_root.joinpath(
209
+ self._model.config["output"]["path"], "fiat.log"
210
+ )
211
+ if not log_file.exists():
212
+ return False
213
+ try:
214
+ with open(log_file, "r", encoding="cp1252") as f:
215
+ return "Geom calculation are done!" in f.read()
216
+ except Exception as e:
217
+ self.logger.error(f"Error while checking if FIAT has run: {e}")
218
+ return False
219
+
220
+ def preprocess(self, scenario: Scenario) -> None:
221
+ """
222
+ Preprocess the FIAT-model given a scenario by setting up projections, measures, and hazards, and then saves any changes made to disk.
223
+
224
+ Args:
225
+ scenario (Scenario): The scenario to preprocess, which includes projection,
226
+ strategy, and hazard.
227
+
228
+ Returns
229
+ -------
230
+ None
231
+ """
232
+ self.logger.info("Pre-processing Delft-FIAT model")
233
+ # Projection
234
+ projection = self.database.projections.get(scenario.projection)
235
+ self.add_projection(projection)
236
+
237
+ # Measures
238
+ strategy = self.database.strategies.get(scenario.strategy)
239
+ for measure in strategy.get_impact_measures():
240
+ self.add_measure(measure)
241
+
242
+ # Hazard
243
+ floodmap = FloodMap(scenario.name)
244
+ var = "risk_maps" if floodmap.mode == Mode.risk else "zsmax"
245
+ is_risk = floodmap.mode == Mode.risk
246
+ self.set_hazard(
247
+ map_fn=floodmap.path,
248
+ map_type=floodmap.type,
249
+ var=var,
250
+ is_risk=is_risk,
251
+ units=us.UnitTypesLength.meters,
252
+ )
253
+
254
+ # Save any changes made to disk as well
255
+ output_path = Impacts(scenario).impacts_path / "fiat_model"
256
+ self.write(path_out=output_path)
257
+
258
+ def run(self, scenario) -> None:
259
+ """
260
+ Execute the full process for a given scenario, including preprocessing, executing the simulation, and postprocessing steps.
261
+
262
+ Args:
263
+ scenario: An object containing the scenario data.
264
+
265
+ Returns
266
+ -------
267
+ None
268
+ """
269
+ sim_path = Impacts(scenario=scenario).impacts_path / "fiat_model"
270
+
271
+ self.preprocess(scenario)
272
+ self.execute(sim_path)
273
+ self.postprocess(scenario)
274
+
275
+ def execute(
276
+ self,
277
+ path: Optional[os.PathLike] = None,
278
+ exe_path: Optional[os.PathLike] = None,
279
+ delete_crashed_runs: Optional[bool] = None,
280
+ strict=True,
281
+ ) -> bool:
282
+ """
283
+ Execute the FIAT model.
284
+
285
+ Parameters
286
+ ----------
287
+ path : Optional[os.PathLike], optional
288
+ The path to the model directory. If not provided, defaults to `self.model_root`.
289
+ exe_path : Optional[os.PathLike], optional
290
+ The path to the FIAT executable. If not provided, defaults to `self.exe_path`.
291
+ delete_crashed_runs : Optional[bool], optional
292
+ Whether to delete files from crashed runs. If not provided, defaults to `self.delete_crashed_runs`.
293
+ strict : bool, optional
294
+ Whether to raise an error if the FIAT model fails to run. Defaults to True.
295
+
296
+ Returns
297
+ -------
298
+ bool
299
+ True if the FIAT model run successfully, False otherwise.
300
+
301
+ Raises
302
+ ------
303
+ ValueError
304
+ If `exe_path` is not provided and `self.exe_path` is None.
305
+ RuntimeError
306
+ If the FIAT model fails to run and `strict` is True.
307
+ """
308
+ if path is None:
309
+ path = self.model_root
310
+ if exe_path is None:
311
+ if self.exe_path is None:
312
+ raise ValueError(
313
+ "'exe_path' needs to be provided either when calling FiatAdapter.execute() or during initialization of the FiatAdapter object."
314
+ )
315
+ exe_path = self.exe_path
316
+ if delete_crashed_runs is None:
317
+ delete_crashed_runs = self.delete_crashed_runs
318
+ path = Path(path)
319
+ fiat_log = path / "fiat.log"
320
+ with cd(path):
321
+ with FloodAdaptLogging.to_file(file_path=fiat_log):
322
+ self.logger.info(f"Running FIAT in {path}")
323
+ process = subprocess.run(
324
+ f'"{exe_path.as_posix()}" run settings.toml',
325
+ stdout=subprocess.PIPE,
326
+ stderr=subprocess.PIPE,
327
+ text=True,
328
+ )
329
+ self.logger.debug(process.stdout)
330
+
331
+ if process.returncode != 0:
332
+ if delete_crashed_runs:
333
+ # Remove all files in the simulation folder except for the log files
334
+ for subdir, dirs, files in os.walk(path, topdown=False):
335
+ for file in files:
336
+ if not file.endswith(".log"):
337
+ os.remove(os.path.join(subdir, file))
338
+
339
+ if not os.listdir(subdir):
340
+ os.rmdir(subdir)
341
+
342
+ if strict:
343
+ raise RuntimeError(f"FIAT model failed to run in {path}.")
344
+ else:
345
+ self.logger.error(f"FIAT model failed to run in {path}.")
346
+
347
+ if process.returncode == 0:
348
+ self.read_outputs()
349
+
350
+ return process.returncode == 0
351
+
352
+ def read_outputs(self) -> None:
353
+ """
354
+ Read the output FIAT CSV file specified in the model configuration and stores the data in the `outputs` attribute.
355
+
356
+ Attributes
357
+ ----------
358
+ outputs : dict
359
+ A dictionary containing the following keys:
360
+ - "path" : Path
361
+ The path to the output directory.
362
+ - "table" : DataFrame
363
+ The contents of the output CSV file.
364
+ """
365
+ # Get output path
366
+ outputs_path = self.model_root.joinpath(self._model.config["output"]["path"])
367
+
368
+ # Get all csvs and concatenate them in a single table
369
+ csv_outputs_df = []
370
+ for output_csv in self._model.config["output"]["csv"]:
371
+ csv_path = outputs_path.joinpath(
372
+ self._model.config["output"]["csv"][output_csv]
373
+ )
374
+ output_csv_df = pd.read_csv(csv_path)
375
+ csv_outputs_df.append(output_csv_df)
376
+ output_csv = pd.concat(csv_outputs_df)
377
+
378
+ # Store them
379
+ self.outputs = {}
380
+ self.outputs["path"] = outputs_path
381
+ self.outputs["table"] = output_csv
382
+
383
+ def _get_aggr_ind(self, aggr_label: str):
384
+ """
385
+ Retrieve the index of the aggregation configuration that matches the given label.
386
+
387
+ Parameters
388
+ ----------
389
+ aggr_label : str
390
+ The label of the aggregation to find.
391
+
392
+ Returns
393
+ -------
394
+ int
395
+ The index of the aggregation configuration that matches the given label.
396
+
397
+ Raises
398
+ ------
399
+ IndexError
400
+ If no aggregation with the given label is found.
401
+ """
402
+ ind = [
403
+ i
404
+ for i, aggr in enumerate(self.config.aggregation)
405
+ if aggr.name == aggr_label
406
+ ][0]
407
+
408
+ return ind
409
+
410
+ def postprocess(self, scenario):
411
+ """
412
+ Post-process the results of the Delft-FIAT simulation for a given scenario.
413
+
414
+ Parameters
415
+ ----------
416
+ scenario : Scenario
417
+ The scenario object containing all relevant data and configurations.
418
+
419
+ Raises
420
+ ------
421
+ RuntimeError
422
+ If the Delft-FIAT simulation did not run successfully.
423
+
424
+ Post-processing steps include:
425
+ - Reading the outputs of the Delft-FIAT simulation.
426
+ - Adding exceedance probabilities for risk mode scenarios.
427
+ - Saving detailed impacts per object to a CSV file.
428
+ - Creating infometrics files based on different metric configurations.
429
+ - Generating infographic files if configured.
430
+ - Calculating equity-based damages for risk mode scenarios.
431
+ - Saving aggregated metrics to shapefiles.
432
+ - Merging points data to building footprints.
433
+ - Creating a roads spatial file if configured.
434
+ - Deleting the simulation folder if the site configuration is set to not keep the simulation.
435
+
436
+ Logging
437
+ -------
438
+ Logs the start and completion of the post-processing steps.
439
+ """
440
+ if not self.fiat_completed():
441
+ raise RuntimeError("Delft-FIAT did not run successfully!")
442
+
443
+ self.logger.info("Post-processing Delft-FIAT results")
444
+
445
+ if not self.outputs:
446
+ self.read_outputs()
447
+ mode = self.database.events.get(scenario.event).mode
448
+
449
+ # Define scenario output path
450
+ impacts = Impacts(scenario=scenario)
451
+ scenario_output_path = impacts.results_path
452
+ impacts_output_path = impacts.impacts_path
453
+
454
+ # Create column mapping to update column names
455
+ name_translation = {}
456
+ for col in self.outputs["table"].columns: # iterate through output columns
457
+ for field in list(self.impact_columns.model_fields): # check for each field
458
+ fiat_col = getattr(self.fiat_columns, field)
459
+ if matches_pattern(col, fiat_col):
460
+ impact_col = getattr(self.impact_columns, field)
461
+ new_col = replace_pattern(col, fiat_col, impact_col)
462
+ if (
463
+ ".0Y" in new_col
464
+ ): # TODO for now quick fix to account for float RP years, while metrics have integers
465
+ new_col = new_col.replace(".0Y", "Y")
466
+ name_translation[col] = new_col # save mapping
467
+ self.name_mapping = name_translation
468
+
469
+ # Rename save outputs
470
+ self.outputs["table"] = self.outputs["table"].rename(columns=self.name_mapping)
471
+
472
+ # Save impacts per object
473
+ fiat_results_path = impacts_output_path.joinpath(
474
+ f"Impacts_detailed_{scenario.name}.csv"
475
+ )
476
+ self.outputs["table"].to_csv(fiat_results_path, index=False)
477
+
478
+ # Add exceedance probabilities if needed (only for risk)
479
+ if mode == Mode.risk:
480
+ # Get config path
481
+ # TODO check where this configs should be read from
482
+ config_path = self.database.static_path.joinpath(
483
+ "templates", "infometrics", "metrics_additional_risk_configs.toml"
484
+ )
485
+ with open(config_path, mode="rb") as fp:
486
+ config = tomli.load(fp)["flood_exceedance"]
487
+ self.add_exceedance_probability(
488
+ column=config[
489
+ "column"
490
+ ], # TODO check how to the correct version of column
491
+ threshold=config["threshold"],
492
+ period=config["period"],
493
+ )
494
+
495
+ # Create the infometrics files
496
+ if mode == Mode.risk:
497
+ ext = "_risk"
498
+ else:
499
+ ext = ""
500
+
501
+ # Get options for metric configurations
502
+ metric_types = ["mandatory", "additional"] # these are checked always
503
+
504
+ if self.config.infographics: # if infographics are created
505
+ metric_types += ["infographic"]
506
+
507
+ metric_config_paths = [
508
+ self.database.static_path.joinpath(
509
+ "templates", "infometrics", f"{name}_metrics_config{ext}.toml"
510
+ )
511
+ for name in metric_types
512
+ ]
513
+
514
+ # Specify the metrics output path
515
+ metrics_outputs_path = scenario_output_path.joinpath(
516
+ f"Infometrics_{scenario.name}.csv"
517
+ )
518
+ self.create_infometrics(metric_config_paths, metrics_outputs_path)
519
+
520
+ # Get paths of created aggregated infometrics
521
+ aggr_metrics_paths = list(
522
+ metrics_outputs_path.parent.glob(f"{metrics_outputs_path.stem}_*.csv")
523
+ )
524
+
525
+ # Create the infographic files
526
+ if self.config.infographics:
527
+ config_base_path = self.database.static_path.joinpath(
528
+ "templates", "Infographics"
529
+ )
530
+ self.create_infographics(
531
+ name=scenario.name,
532
+ output_base_path=scenario_output_path,
533
+ config_base_path=config_base_path,
534
+ metrics_path=metrics_outputs_path,
535
+ mode=mode,
536
+ )
537
+
538
+ # Calculate equity based damages
539
+ if mode == Mode.risk:
540
+ for file in aggr_metrics_paths:
541
+ # Load metrics
542
+ aggr_label = file.stem.split(f"{metrics_outputs_path.stem}_")[-1]
543
+ self.add_equity(aggr_label=aggr_label, metrics_path=file)
544
+
545
+ # Save aggregated metrics to shapefiles
546
+ for file in aggr_metrics_paths:
547
+ aggr_label = file.stem.split(f"{metrics_outputs_path.stem}_")[-1]
548
+ output_path = impacts_output_path.joinpath(
549
+ f"Impacts_aggregated_{scenario.name}_{aggr_label}.gpkg"
550
+ )
551
+ self.save_aggregation_spatial(
552
+ aggr_label=aggr_label, metrics_path=file, output_path=output_path
553
+ )
554
+
555
+ # Merge points data to building footprints
556
+ self.save_building_footprints(
557
+ output_path=impacts_output_path.joinpath(
558
+ f"Impacts_building_footprints_{scenario.name}.gpkg"
559
+ )
560
+ )
561
+
562
+ # Create a roads spatial file
563
+ if self.config.roads_file_name:
564
+ self.save_roads(
565
+ output_path=impacts_output_path.joinpath(
566
+ f"Impacts_roads_{scenario.name}.gpkg"
567
+ )
568
+ )
569
+
570
+ self.logger.info("Delft-FIAT post-processing complete!")
571
+
572
+ # If site config is set to not keep FIAT simulation, delete folder
573
+ if not self.config.save_simulation:
574
+ self.delete_model()
575
+
576
+ def add_measure(self, measure: Measure):
577
+ """
578
+ Add and apply a specific impact measure to the properties of the FIAT model.
579
+
580
+ Parameters
581
+ ----------
582
+ measure : Measure
583
+ The impact measure to be applied. It can be of type Elevate, FloodProof, or Buyout.
584
+
585
+ Notes
586
+ -----
587
+ The method logs the application of the measure and calls the appropriate method based on the measure type:
588
+ - Elevate: Calls elevate_properties(measure)
589
+ - FloodProof: Calls floodproof_properties(measure)
590
+ - Buyout: Calls buyout_properties(measure)
591
+
592
+ If the measure type is unsupported, a warning is logged.
593
+ """
594
+ self.logger.info(f"Applying impact measure '{measure.name}'")
595
+ if isinstance(measure, Elevate):
596
+ self.elevate_properties(measure)
597
+ elif isinstance(measure, FloodProof):
598
+ self.floodproof_properties(measure)
599
+ elif isinstance(measure, Buyout):
600
+ self.buyout_properties(measure)
601
+ else:
602
+ self.logger.warning(
603
+ f"Skipping unsupported measure type {measure.__class__.__name__}"
604
+ )
605
+
606
+ def add_projection(self, projection: Projection):
607
+ """
608
+ Add the socioeconomic changes part of a projection to the FIAT model.
609
+
610
+ Parameters
611
+ ----------
612
+ projection : Projection
613
+ The projection object containing socioeconomic changes to be applied.
614
+
615
+ Notes
616
+ -----
617
+ - Economic growth is applied to all existing buildings if specified.
618
+ - New population growth areas are added if specified, taking into account
619
+ economic growth.
620
+ - Population growth is applied to existing objects if specified.
621
+ """
622
+ self.logger.info(
623
+ f"Applying socioeconomic changes from projection '{projection.name}'"
624
+ )
625
+ socio_economic_change = projection.socio_economic_change
626
+
627
+ ids_all_buildings = self.get_all_building_ids()
628
+
629
+ # Implement socioeconomic changes if needed
630
+ # First apply economic growth to existing objects
631
+ if not math.isclose(socio_economic_change.economic_growth, 0, abs_tol=1e-6):
632
+ self.apply_economic_growth(
633
+ economic_growth=socio_economic_change.economic_growth,
634
+ ids=ids_all_buildings, #
635
+ )
636
+
637
+ # Then the new population growth area is added if provided
638
+ # In the new areas, the economic growth is taken into account!
639
+ # Order matters since for the pop growth new, we only want the economic growth!
640
+ if not math.isclose(
641
+ socio_economic_change.population_growth_new, 0, abs_tol=1e-6
642
+ ):
643
+ # Get path of new development area geometry
644
+ area_path = resolve_filepath(
645
+ object_dir=ObjectDir.projection,
646
+ obj_name=projection.name,
647
+ path=socio_economic_change.new_development_shapefile,
648
+ )
649
+
650
+ # Get DEM location for assigning elevation to new areas
651
+ dem = (
652
+ self.database.static_path
653
+ / "dem"
654
+ / self.database.site.sfincs.dem.filename
655
+ )
656
+ # Call adapter method to add the new areas
657
+ self.apply_population_growth_new(
658
+ population_growth=socio_economic_change.population_growth_new,
659
+ ground_floor_height=socio_economic_change.new_development_elevation.value,
660
+ elevation_type=socio_economic_change.new_development_elevation.type,
661
+ area_path=area_path,
662
+ ground_elevation=dem,
663
+ )
664
+
665
+ # Then apply population growth to existing objects
666
+ if not math.isclose(
667
+ socio_economic_change.population_growth_existing, 0, abs_tol=1e-6
668
+ ):
669
+ self.apply_population_growth_existing(
670
+ population_growth=socio_economic_change.population_growth_existing,
671
+ ids=ids_all_buildings,
672
+ )
673
+
674
+ def set_hazard(
675
+ self,
676
+ map_fn: Union[os.PathLike, list[os.PathLike]],
677
+ map_type: FloodmapType,
678
+ var: str,
679
+ is_risk: bool = False,
680
+ units: str = us.UnitTypesLength.meters,
681
+ ) -> None:
682
+ """
683
+ Set the hazard map and type for the FIAT model.
684
+
685
+ Parameters
686
+ ----------
687
+ map_fn : str
688
+ The filename of the hazard map.
689
+ map_type : FloodmapType
690
+ The type of the flood map.
691
+ var : str
692
+ The variable name in the hazard map.
693
+ is_risk : bool, optional
694
+ Flag indicating if the map is a risk output. Defaults to False.
695
+ units : str, optional
696
+ The units of the hazard map. Defaults to us.UnitTypesLength.meters.
697
+ """
698
+ self.logger.info(f"Setting hazard to the {map_type} map {map_fn}")
699
+ # Add the floodmap data to a data catalog with the unit conversion
700
+ wl_current_units = us.UnitfulLength(value=1.0, units=units)
701
+ conversion_factor = wl_current_units.convert(self._model.exposure.unit)
702
+
703
+ self._model.setup_hazard(
704
+ map_fn=map_fn,
705
+ map_type=map_type,
706
+ rp=None,
707
+ crs=None, # change this in new version (maybe to str(floodmap.crs.split(':')[1]))
708
+ nodata=-999, # change this in new version
709
+ var=var,
710
+ chunks="auto",
711
+ risk_output=is_risk,
712
+ unit_conversion_factor=conversion_factor,
713
+ )
714
+
715
+ # PROJECTIONS
716
+
717
+ def apply_economic_growth(
718
+ self, economic_growth: float, ids: Optional[list] = None
719
+ ) -> None:
720
+ """
721
+ Apply economic growth to the FIAT-Model by adjusting the maximum potential damage values in the exposure database.
722
+
723
+ This method updates the max potential damage values in the exposure database by
724
+ applying a given economic growth rate. It can optionally filter the updates to
725
+ specific objects identified by their IDs.
726
+
727
+ Parameters
728
+ ----------
729
+ economic_growth : float
730
+ The economic growth rate to apply, expressed as a percentage.
731
+ ids : Optional[list], default=None
732
+ A list of object IDs to which the economic growth should be applied. If None, the growth is applied to all buildings.
733
+ """
734
+ self.logger.info(f"Applying economic growth of {economic_growth} %.")
735
+ # Get columns that include max damage
736
+ damage_cols = [
737
+ c
738
+ for c in self._model.exposure.exposure_db.columns
739
+ if matches_pattern(c, self.fiat_columns.max_potential_damage)
740
+ ]
741
+
742
+ # Get objects that are buildings (using site info)
743
+ buildings_rows = ~self._model.exposure.exposure_db[
744
+ self.fiat_columns.primary_object_type
745
+ ].isin(self.config.non_building_names)
746
+
747
+ # If ids are given use that as an additional filter
748
+ if ids:
749
+ buildings_rows = buildings_rows & self._model.exposure.exposure_db[
750
+ self.fiat_columns.object_id
751
+ ].isin(ids)
752
+
753
+ # Update columns using economic growth value
754
+ updated_max_pot_damage = self._model.exposure.exposure_db.copy()
755
+ updated_max_pot_damage.loc[buildings_rows, damage_cols] *= (
756
+ 1.0 + economic_growth / 100.0
757
+ )
758
+
759
+ # update fiat model
760
+ self._model.exposure.update_max_potential_damage(
761
+ updated_max_potential_damages=updated_max_pot_damage
762
+ )
763
+
764
+ def apply_population_growth_existing(
765
+ self, population_growth: float, ids: Optional[list[str]] = None
766
+ ) -> None:
767
+ """
768
+ Apply population growth to the FIAT-Model by adjusting the existing max potential damage values for buildings.
769
+
770
+ This method updates the max potential damage values in the exposure database by
771
+ applying a given population growth rate. It can optionally filter the updates to
772
+ specific objects identified by their IDs.
773
+
774
+ Parameters
775
+ ----------
776
+ population_growth : float
777
+ The population growth rate as a percentage.
778
+ ids : Optional[list[str]]
779
+ A list of object IDs to filter the updates. If None, the updates are applied to all buildings.
780
+ """
781
+ self.logger.info(f"Applying population growth of {population_growth} %.")
782
+ # Get columns that include max damage
783
+ damage_cols = [
784
+ c
785
+ for c in self._model.exposure.exposure_db.columns
786
+ if matches_pattern(c, self.fiat_columns.max_potential_damage)
787
+ ]
788
+
789
+ # Get objects that are buildings (using site info)
790
+ buildings_rows = ~self._model.exposure.exposure_db[
791
+ self.fiat_columns.primary_object_type
792
+ ].isin(self.config.non_building_names)
793
+
794
+ # If ids are given use that as an additional filter
795
+ if ids:
796
+ buildings_rows = buildings_rows & self._model.exposure.exposure_db[
797
+ self.fiat_columns.object_id
798
+ ].isin(ids)
799
+
800
+ # Update columns using economic growth value
801
+ updated_max_pot_damage = self._model.exposure.exposure_db.copy()
802
+ updated_max_pot_damage.loc[buildings_rows, damage_cols] *= (
803
+ 1.0 + population_growth / 100.0
804
+ )
805
+
806
+ # update fiat model
807
+ self._model.exposure.update_max_potential_damage(
808
+ updated_max_potential_damages=updated_max_pot_damage
809
+ )
810
+
811
+ def apply_population_growth_new(
812
+ self,
813
+ population_growth: float,
814
+ ground_floor_height: float,
815
+ elevation_type: str,
816
+ area_path: str,
817
+ ground_elevation: Union[None, str, Path] = None,
818
+ ) -> None:
819
+ """
820
+ Apply population growth in a new area by adding new objects in the model.
821
+
822
+ Parameters
823
+ ----------
824
+ population_growth : float
825
+ The percentage of population growth to apply.
826
+ ground_floor_height : float
827
+ The height of the ground floor.
828
+ elevation_type : str
829
+ The type of elevation reference to use. Must be either 'floodmap' or 'datum'.
830
+ area_path : str
831
+ The path to the area file.
832
+ ground_elevation : Union[None, str, Path], optional
833
+ The ground elevation reference. Default is None.
834
+
835
+ Raises
836
+ ------
837
+ ValueError
838
+ If `elevation_type` is 'floodmap' and base flood elevation (bfe) map is not provided.
839
+ If `elevation_type` is not 'floodmap' or 'datum'.
840
+ """
841
+ self.logger.info(
842
+ f"Applying population growth of {population_growth} %, by creating a new development area using the geometries from {area_path} and a ground floor height of {ground_floor_height} {self._model.exposure.unit} above '{elevation_type}'."
843
+ )
844
+ # Get reference type to align with hydromt
845
+ if elevation_type == "floodmap":
846
+ if not self.config.bfe:
847
+ raise ValueError(
848
+ "Base flood elevation (bfe) map is required to use 'floodmap' as reference."
849
+ )
850
+ kwargs = {
851
+ "elevation_reference": "geom",
852
+ "path_ref": self.database.static_path.joinpath(self.config.bfe.geom),
853
+ "attr_ref": self.config.bfe.field_name,
854
+ }
855
+ elif elevation_type == "datum":
856
+ kwargs = {"elevation_reference": "datum"}
857
+ else:
858
+ raise ValueError("elevation type can only be one of 'floodmap' or 'datum'")
859
+ # Get aggregation areas info
860
+ aggregation_areas = [
861
+ self.database.static_path.joinpath(aggr.file)
862
+ for aggr in self.config.aggregation
863
+ ]
864
+ attribute_names = [aggr.field_name for aggr in self.config.aggregation]
865
+ label_names = [
866
+ self.fiat_columns.aggregation_label.format(name=aggr.name)
867
+ for aggr in self.config.aggregation
868
+ ]
869
+ new_dev_geom_name = Path(self.config.new_development_file_name).stem
870
+ # Use hydromt function
871
+ self._model.exposure.setup_new_composite_areas(
872
+ percent_growth=population_growth,
873
+ geom_file=Path(area_path),
874
+ ground_floor_height=ground_floor_height,
875
+ damage_types=self.damage_types,
876
+ vulnerability=self._model.vulnerability,
877
+ ground_elevation=ground_elevation,
878
+ aggregation_area_fn=aggregation_areas,
879
+ attribute_names=attribute_names,
880
+ label_names=label_names,
881
+ geom_name=new_dev_geom_name,
882
+ **kwargs,
883
+ )
884
+
885
+ # MEASURES
886
+ @staticmethod
887
+ def _get_area_name(measure: Measure):
888
+ """
889
+ Determine the area name based on the selection type of the measure.
890
+
891
+ Parameters
892
+ ----------
893
+ measure : Measure
894
+ An instance of Measure containing attributes that define the selection type and area.
895
+
896
+ Returns
897
+ -------
898
+ str
899
+ The name of the area. It returns the aggregation area name if the selection type is "aggregation_area",
900
+ the polygon file name if the selection type is "polygon", and "all" for any other selection type.
901
+ """
902
+ if measure.selection_type == "aggregation_area":
903
+ area = measure.aggregation_area_name
904
+ elif measure.selection_type == "polygon":
905
+ area = measure.polygon_file
906
+ else:
907
+ area = "all"
908
+ return area
909
+
910
+ def elevate_properties(self, elevate: Elevate) -> None:
911
+ """
912
+ Elevate the ground floor height of properties based on the provided Elevate measure.
913
+
914
+ Parameters
915
+ ----------
916
+ elevate : Elevate
917
+ The Elevate measure containing the elevation details.
918
+
919
+ Raises
920
+ ------
921
+ ValueError
922
+ If the elevation type is 'floodmap' and the base flood elevation (bfe) map is not provided.
923
+ If the elevation type is not 'floodmap' or 'datum'.
924
+ """
925
+ area = self._get_area_name(elevate)
926
+ self.logger.info(
927
+ f"Elevating '{elevate.property_type}' type properties in '{area}' by {elevate.elevation} relative to '{elevate.elevation.type}'."
928
+ )
929
+ # If ids are given use that as an additional filter
930
+ objectids = self.get_object_ids(elevate)
931
+
932
+ # Get reference type to align with hydromt
933
+ if elevate.elevation.type == "floodmap":
934
+ if not self.config.bfe:
935
+ raise ValueError(
936
+ "Base flood elevation (bfe) map is required to use 'floodmap' as reference."
937
+ )
938
+ if self.config.bfe.table:
939
+ path_ref = self.config_base_path.joinpath(self.config.bfe.table)
940
+ height_reference = "table"
941
+ else:
942
+ path_ref = self.config_base_path.joinpath(self.config.bfe.geom)
943
+ height_reference = "geom"
944
+ # Use hydromt function
945
+ self._model.exposure.raise_ground_floor_height(
946
+ raise_by=elevate.elevation.value,
947
+ objectids=objectids,
948
+ height_reference=height_reference,
949
+ path_ref=path_ref,
950
+ attr_ref=self.config.bfe.field_name,
951
+ )
952
+
953
+ elif elevate.elevation.type == "datum":
954
+ # Use hydromt function
955
+ self._model.exposure.raise_ground_floor_height(
956
+ raise_by=elevate.elevation.value,
957
+ objectids=objectids,
958
+ height_reference="datum",
959
+ )
960
+ else:
961
+ raise ValueError("elevation type can only be one of 'floodmap' or 'datum'")
962
+
963
+ def buyout_properties(self, buyout: Buyout) -> None:
964
+ """
965
+ Apply the buyout measure to the properties by setting their maximum potential damage to zero.
966
+
967
+ Parameters
968
+ ----------
969
+ buyout : Buyout
970
+ The Buyout measure containing the details of the properties to be bought out.
971
+
972
+ """
973
+ area = self._get_area_name(buyout)
974
+ self.logger.info(
975
+ f"Buying-out '{buyout.property_type}' type properties in '{area}'."
976
+ )
977
+ # Get columns that include max damage
978
+ damage_cols = [
979
+ c
980
+ for c in self._model.exposure.exposure_db.columns
981
+ if matches_pattern(c, self.fiat_columns.max_potential_damage)
982
+ ]
983
+
984
+ # Get objects that are buildings (using site info)
985
+ buildings_rows = ~self._model.exposure.exposure_db[
986
+ self.fiat_columns.primary_object_type
987
+ ].isin(self.config.non_building_names)
988
+
989
+ # Get rows that are affected
990
+ objectids = self.get_object_ids(buyout)
991
+ rows = (
992
+ self._model.exposure.exposure_db[self.fiat_columns.object_id].isin(
993
+ objectids
994
+ )
995
+ & buildings_rows
996
+ )
997
+
998
+ # Update columns
999
+ updated_max_pot_damage = self._model.exposure.exposure_db.copy()
1000
+ updated_max_pot_damage.loc[rows, damage_cols] *= 0
1001
+
1002
+ # update fiat model
1003
+ self._model.exposure.update_max_potential_damage(
1004
+ updated_max_potential_damages=updated_max_pot_damage
1005
+ )
1006
+
1007
+ def floodproof_properties(self, floodproof: FloodProof) -> None:
1008
+ """
1009
+ Apply floodproofing measures to the properties by truncating the damage function.
1010
+
1011
+ Parameters
1012
+ ----------
1013
+ floodproof : FloodProof
1014
+ The FloodProof measure containing the details of the properties to be floodproofed.
1015
+ """
1016
+ area = self._get_area_name(floodproof)
1017
+ self.logger.info(
1018
+ f"Flood-proofing '{floodproof.property_type}' type properties in '{area}' by {floodproof.elevation}."
1019
+ )
1020
+ # If ids are given use that as an additional filter
1021
+ objectids = self.get_object_ids(floodproof)
1022
+
1023
+ # Use hydromt function
1024
+ self._model.exposure.truncate_damage_function(
1025
+ objectids=objectids,
1026
+ floodproof_to=floodproof.elevation.value,
1027
+ damage_function_types=self.damage_types,
1028
+ vulnerability=self._model.vulnerability,
1029
+ )
1030
+
1031
+ # STATIC METHODS
1032
+
1033
+ def get_buildings(self) -> gpd.GeoDataFrame:
1034
+ """
1035
+ Retrieve the building geometries from the FIAT model's exposure database.
1036
+
1037
+ Returns
1038
+ -------
1039
+ gpd.GeoDataFrame
1040
+ A GeoDataFrame containing the geometries of all buildings in the FIAT model.
1041
+
1042
+ Raises
1043
+ ------
1044
+ ValueError
1045
+ If the FIAT model does not have an exposure database initialized.
1046
+ """
1047
+ if self._model.exposure is None:
1048
+ raise ValueError(
1049
+ "FIAT model does not have exposure, make sure your model has been initialized."
1050
+ )
1051
+ gdf_0 = self._model.exposure.select_objects(
1052
+ primary_object_type="ALL",
1053
+ non_building_names=self.config.non_building_names,
1054
+ return_gdf=True,
1055
+ )
1056
+ # Rename columns
1057
+ name_translation = {}
1058
+ for col in gdf_0.columns: # iterate through output columns
1059
+ for field in list(self.impact_columns.model_fields): # check for each field
1060
+ fiat_col = getattr(self.fiat_columns, field)
1061
+ if matches_pattern(col, fiat_col):
1062
+ impact_col = getattr(self.impact_columns, field)
1063
+ new_col = replace_pattern(col, fiat_col, impact_col)
1064
+ name_translation[col] = new_col # save mapping
1065
+ gdf = gdf_0.rename(columns=name_translation)
1066
+ return gdf
1067
+
1068
+ def get_property_types(self) -> list:
1069
+ """
1070
+ Retrieve the list of property types from the FIAT model's exposure database.
1071
+
1072
+ Returns
1073
+ -------
1074
+ list
1075
+ A list of property types available in the FIAT model.
1076
+
1077
+ Raises
1078
+ ------
1079
+ ValueError
1080
+ If no property types are found in the FIAT model.
1081
+ """
1082
+ types = self._model.exposure.get_primary_object_type()
1083
+ if types is None:
1084
+ raise ValueError("No property types found in the FIAT model.")
1085
+ types.append("all") # Add "all" type for using as identifier
1086
+
1087
+ names = self.config.non_building_names
1088
+ if names:
1089
+ for name in names:
1090
+ if name in types:
1091
+ types.remove(name)
1092
+
1093
+ return types
1094
+
1095
+ def get_all_building_ids(self):
1096
+ """
1097
+ Retrieve the IDs of all buildings in the FIAT model.
1098
+
1099
+ Returns
1100
+ -------
1101
+ list
1102
+ A list of IDs for all buildings in the FIAT model.
1103
+ """
1104
+ # Get ids of existing buildings
1105
+ ids = self._model.exposure.get_object_ids(
1106
+ "all", non_building_names=self.config.non_building_names
1107
+ )
1108
+ return ids
1109
+
1110
+ def get_object_ids(self, measure: Measure) -> list[Any]:
1111
+ """
1112
+ Retrieve the object IDs for a given impact measure.
1113
+
1114
+ Parameters
1115
+ ----------
1116
+ measure : Measure
1117
+ The impact measure for which to retrieve object IDs.
1118
+
1119
+ Returns
1120
+ -------
1121
+ list[Any]
1122
+ A list of object IDs that match the criteria of the given measure.
1123
+
1124
+ Raises
1125
+ ------
1126
+ ValueError
1127
+ If the measure type is not an impact measure.
1128
+ """
1129
+ if not MeasureType.is_impact(measure.type):
1130
+ raise ValueError(
1131
+ f"Measure type {measure.type} is not an impact measure. "
1132
+ "Can only retrieve object ids for impact measures."
1133
+ )
1134
+
1135
+ # check if polygon file is used, then get the absolute path
1136
+ if measure.polygon_file:
1137
+ polygon_file = resolve_filepath(
1138
+ object_dir=ObjectDir.measure,
1139
+ obj_name=measure.name,
1140
+ path=measure.polygon_file,
1141
+ )
1142
+ else:
1143
+ polygon_file = None
1144
+
1145
+ # use the hydromt-fiat method to the ids
1146
+ ids = self._model.exposure.get_object_ids(
1147
+ selection_type=measure.selection_type,
1148
+ property_type=measure.property_type,
1149
+ non_building_names=self.config.non_building_names,
1150
+ aggregation=measure.aggregation_area_type,
1151
+ aggregation_area_name=measure.aggregation_area_name,
1152
+ polygon_file=str(polygon_file),
1153
+ )
1154
+
1155
+ return ids
1156
+
1157
+ # POST-PROCESSING METHODS
1158
+
1159
+ def add_exceedance_probability(
1160
+ self, column: str, threshold: float, period: int
1161
+ ) -> pd.DataFrame:
1162
+ """Calculate exceedance probabilities and append them to the results table.
1163
+
1164
+ Parameters
1165
+ ----------
1166
+ column : str
1167
+ The name of the column to calculate exceedance probabilities for.
1168
+ threshold : float
1169
+ The threshold value for exceedance probability calculation.
1170
+ period : int
1171
+ The return period for exceedance probability calculation.
1172
+
1173
+ Returns
1174
+ -------
1175
+ pd.DataFrame
1176
+ The updated results table with exceedance probabilities appended.
1177
+ """
1178
+ self.logger.info("Calculating exceedance probabilities")
1179
+ fiat_results_df = ExceedanceProbabilityCalculator(column).append_probability(
1180
+ self.outputs["table"], threshold, period
1181
+ )
1182
+ self.outputs["table"] = fiat_results_df
1183
+ return self.outputs["table"]
1184
+
1185
+ def create_infometrics(
1186
+ self, metric_config_paths: list[os.PathLike], metrics_output_path: os.PathLike
1187
+ ) -> None:
1188
+ """
1189
+ Create infometrics files based on the provided metric configuration paths.
1190
+
1191
+ Parameters
1192
+ ----------
1193
+ metric_config_paths : list[os.PathLike]
1194
+ A list of paths to the metric configuration files.
1195
+ metrics_output_path : os.PathLike
1196
+ The path where the metrics output file will be saved.
1197
+
1198
+ Raises
1199
+ ------
1200
+ FileNotFoundError
1201
+ If a mandatory metric configuration file does not exist.
1202
+ """
1203
+ # Get the metrics configuration
1204
+ self.logger.info("Calculating infometrics")
1205
+
1206
+ # Write the metrics to file
1207
+ # Check if type of metric configuration is available
1208
+ for metric_file in metric_config_paths:
1209
+ if metric_file.exists():
1210
+ metrics_writer = MetricsFileWriter(
1211
+ metric_file,
1212
+ aggregation_label_fmt=self.impact_columns.aggregation_label,
1213
+ )
1214
+
1215
+ metrics_writer.parse_metrics_to_file(
1216
+ df_results=self.outputs["table"],
1217
+ metrics_path=metrics_output_path,
1218
+ write_aggregate=None,
1219
+ )
1220
+
1221
+ metrics_writer.parse_metrics_to_file(
1222
+ df_results=self.outputs["table"],
1223
+ metrics_path=metrics_output_path,
1224
+ write_aggregate="all",
1225
+ )
1226
+ else:
1227
+ if "mandatory" in metric_file.name.lower():
1228
+ raise FileNotFoundError(
1229
+ f"Mandatory metric configuration file {metric_file} does not exist!"
1230
+ )
1231
+
1232
+ def create_infographics(
1233
+ self,
1234
+ name: str,
1235
+ output_base_path: os.PathLike,
1236
+ config_base_path: os.PathLike,
1237
+ metrics_path: os.PathLike,
1238
+ mode: Mode = Mode.single_event,
1239
+ ):
1240
+ """Create infographic files based on the provided metrics and configuration.
1241
+
1242
+ Parameters
1243
+ ----------
1244
+ name : str
1245
+ The name of the scenario.
1246
+ output_base_path : os.PathLike
1247
+ The base path where the output files will be saved.
1248
+ config_base_path : os.PathLike
1249
+ The base path where the configuration files are located.
1250
+ metrics_path : os.PathLike
1251
+ The path to the metrics file.
1252
+ mode : Mode, optional
1253
+ The mode of the infographic, by default Mode.single_event.
1254
+ """
1255
+ self.logger.info("Creating infographics")
1256
+
1257
+ # Check if infographics config file exists
1258
+ if mode == Mode.risk:
1259
+ config_path = config_base_path.joinpath("config_risk_charts.toml")
1260
+ if not config_path.exists():
1261
+ self.logger.warning(
1262
+ "Risk infographic cannot be created, since 'config_risk_charts.toml' is not available"
1263
+ )
1264
+ return
1265
+
1266
+ # Get the infographic
1267
+ InforgraphicFactory.create_infographic_file_writer(
1268
+ infographic_mode=mode,
1269
+ scenario_name=name,
1270
+ metrics_full_path=metrics_path,
1271
+ config_base_path=config_base_path,
1272
+ output_base_path=output_base_path,
1273
+ ).write_infographics_to_file()
1274
+
1275
+ def add_equity(
1276
+ self,
1277
+ aggr_label: str,
1278
+ metrics_path: os.PathLike,
1279
+ damage_column_pattern: str = "TotalDamageRP{rp}",
1280
+ gamma: float = 1.2,
1281
+ ):
1282
+ """Calculate equity-based damages for a given aggregation label.
1283
+
1284
+ Parameters
1285
+ ----------
1286
+ aggr_label : str
1287
+ The label of the aggregation area.
1288
+ metrics_path : os.PathLike
1289
+ The path to the metrics file.
1290
+ damage_column_pattern : str, optional
1291
+ The pattern for the damage column names, by default "TotalDamageRP{rp}".
1292
+ gamma : float, optional
1293
+ The equity weight parameter, by default 1.2
1294
+ """
1295
+ # TODO gamma in configuration file?
1296
+
1297
+ ind = self._get_aggr_ind(aggr_label)
1298
+ # TODO check what happens if aggr_label not in config
1299
+
1300
+ if self.config.aggregation[ind].equity is None:
1301
+ self.logger.warning(
1302
+ f"Cannot calculate equity weighted risk for aggregation label: {aggr_label}, because equity inputs are not available."
1303
+ )
1304
+ return
1305
+
1306
+ self.logger.info(
1307
+ f"Calculating equity weighted risk for aggregation label: {aggr_label} "
1308
+ )
1309
+ metrics = pd.read_csv(metrics_path)
1310
+ # Create Equity object
1311
+ equity = Equity(
1312
+ census_table=self.config_base_path.joinpath(
1313
+ self.config.aggregation[ind].equity.census_data
1314
+ ),
1315
+ damages_table=metrics,
1316
+ aggregation_label=self.config.aggregation[ind].field_name,
1317
+ percapitaincome_label=self.config.aggregation[
1318
+ ind
1319
+ ].equity.percapitaincome_label,
1320
+ totalpopulation_label=self.config.aggregation[
1321
+ ind
1322
+ ].equity.totalpopulation_label,
1323
+ damage_column_pattern=damage_column_pattern,
1324
+ )
1325
+ # Calculate equity
1326
+ df_equity = equity.equity_calculation(gamma)
1327
+ # Merge with metrics tables and resave
1328
+ metrics_new = metrics.merge(
1329
+ df_equity,
1330
+ left_on=metrics.columns[0],
1331
+ right_on=self.config.aggregation[ind].field_name,
1332
+ how="left",
1333
+ )
1334
+ del metrics_new[self.config.aggregation[ind].field_name]
1335
+ metrics_new = metrics_new.set_index(metrics_new.columns[0])
1336
+ metrics_new.loc["Description", ["EW", "EWEAD", "EWCEAD"]] = [
1337
+ "Equity weight",
1338
+ "Equity weighted expected annual damage",
1339
+ "Equity weighted certainty equivalent annual damage",
1340
+ ]
1341
+ metrics_new.loc["Show In Metrics Table", ["EW", "EWEAD", "EWCEAD"]] = [
1342
+ True,
1343
+ True,
1344
+ True,
1345
+ ]
1346
+ metrics_new.loc["Long Name", ["EW", "EWEAD", "EWCEAD"]] = [
1347
+ "Equity weight",
1348
+ "Equity weighted expected annual damage",
1349
+ "Equity weighted certainty equivalent annual damage",
1350
+ ]
1351
+ metrics_new.index.name = None
1352
+ metrics_new.to_csv(metrics_path)
1353
+
1354
+ def save_aggregation_spatial(
1355
+ self, aggr_label: str, metrics_path: os.PathLike, output_path: os.PathLike
1356
+ ):
1357
+ """
1358
+ Save aggregated metrics to a spatial file.
1359
+
1360
+ Parameters
1361
+ ----------
1362
+ aggr_label : str
1363
+ The label of the aggregation area.
1364
+ metrics_path : os.PathLike
1365
+ The path to the metrics file.
1366
+ output_path : os.PathLike
1367
+ The path where the output spatial file will be saved.
1368
+ """
1369
+ self.logger.info(f"Saving impacts for aggregation areas type: '{aggr_label}'")
1370
+
1371
+ metrics = pd.read_csv(metrics_path)
1372
+
1373
+ # Load aggregation areas
1374
+ ind = self._get_aggr_ind(aggr_label)
1375
+
1376
+ aggr_areas_path = self.config_base_path.joinpath(
1377
+ self.config.aggregation[ind].file
1378
+ )
1379
+
1380
+ aggr_areas = gpd.read_file(aggr_areas_path, engine="pyogrio")
1381
+
1382
+ # Save file
1383
+ AggregationAreas.write_spatial_file(
1384
+ metrics,
1385
+ aggr_areas,
1386
+ output_path,
1387
+ id_name=self.config.aggregation[ind].field_name,
1388
+ file_format="geopackage",
1389
+ )
1390
+
1391
+ def save_building_footprints(self, output_path: os.PathLike):
1392
+ """
1393
+ Aggregate impacts at a building footprint level and then saves to an output file.
1394
+
1395
+ Parameters
1396
+ ----------
1397
+ output_path : os.PathLike
1398
+ The path where the output spatial file will be saved.
1399
+
1400
+ Raises
1401
+ ------
1402
+ ValueError
1403
+ If no building footprints are provided in the configuration.
1404
+ """
1405
+ self.logger.info("Calculating impacts at a building footprint scale")
1406
+
1407
+ # Read the existing building points
1408
+ buildings = self._model.exposure.select_objects(
1409
+ primary_object_type="ALL",
1410
+ non_building_names=self.config.non_building_names,
1411
+ return_gdf=True,
1412
+ )
1413
+
1414
+ # Change names
1415
+ buildings = buildings[[self.fiat_columns.object_id, "geometry"]]
1416
+ buildings = buildings.rename(
1417
+ columns={self.fiat_columns.object_id: self.impact_columns.object_id}
1418
+ )
1419
+
1420
+ # Get all results per building
1421
+ fiat_results_df = gpd.GeoDataFrame(
1422
+ self.outputs["table"].merge(
1423
+ buildings,
1424
+ on=self.impact_columns.object_id,
1425
+ how="inner",
1426
+ )
1427
+ )
1428
+
1429
+ # Check which footprint case we have
1430
+ # If FIAT has points and external footprints are provided
1431
+ if self.config.building_footprints:
1432
+ method = "external_footprints"
1433
+ # Get footprints file
1434
+ footprints_path = self.config_base_path.joinpath(
1435
+ self.config.building_footprints
1436
+ )
1437
+ # Read building footprints
1438
+ footprints_gdf = gpd.read_file(footprints_path, engine="pyogrio")
1439
+ field_name = "BF_FID"
1440
+ # If FIAT has footprints already
1441
+ elif all(buildings.geometry.geom_type.isin(["Polygon", "MultiPolygon"])):
1442
+ method = "internal_footprints"
1443
+ footprints_gdf = buildings[[self.impact_columns.object_id, "geometry"]]
1444
+ field_name = self.impact_columns.object_id
1445
+ # If FIAT has points and no external footprints are available
1446
+ else:
1447
+ method = "no_footprints"
1448
+
1449
+ # Based on case follow different workflow
1450
+ if method in ["external_footprints", "internal_footprints"]:
1451
+ footprints = Footprints(
1452
+ footprints=footprints_gdf,
1453
+ fiat_columns=self.impact_columns,
1454
+ field_name=field_name,
1455
+ )
1456
+ footprints.aggregate(fiat_results_df)
1457
+ elif method == "no_footprints":
1458
+ footprints = Footprints(fiat_columns=self.impact_columns)
1459
+ footprints.set_point_data(fiat_results_df)
1460
+
1461
+ # Normalize damages
1462
+ footprints.calc_normalized_damages()
1463
+
1464
+ # Save footprint
1465
+ footprints.write(output_path)
1466
+
1467
+ def save_roads(self, output_path: os.PathLike):
1468
+ """
1469
+ Save the impacts on roads to a spatial file.
1470
+
1471
+ Parameters
1472
+ ----------
1473
+ output_path : os.PathLike
1474
+ The path where the output spatial file will be saved.
1475
+ """
1476
+ self.logger.info("Calculating road impacts")
1477
+ # Read roads spatial file
1478
+ roads = gpd.read_file(
1479
+ self.outputs["path"].joinpath(self.config.roads_file_name)
1480
+ )
1481
+ roads = roads.rename(columns=self.name_mapping)
1482
+ # Get columns to use
1483
+ aggr_cols = [
1484
+ name
1485
+ for name in self.outputs["table"].columns
1486
+ if self.impact_columns.aggregation_label in name
1487
+ ]
1488
+ inun_cols = [
1489
+ name
1490
+ for name in roads.columns
1491
+ if self.impact_columns.inundation_depth in name
1492
+ ]
1493
+ # Merge data
1494
+ roads = roads[[self.impact_columns.object_id, "geometry"] + inun_cols].merge(
1495
+ self.outputs["table"][
1496
+ [self.impact_columns.object_id, self.impact_columns.primary_object_type]
1497
+ + aggr_cols
1498
+ ],
1499
+ on=self.impact_columns.object_id,
1500
+ )
1501
+ # Save as geopackage
1502
+ roads.to_file(output_path, driver="GPKG")