flood-adapt 0.3.9__py3-none-any.whl → 0.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. flood_adapt/__init__.py +26 -22
  2. flood_adapt/adapter/__init__.py +9 -9
  3. flood_adapt/adapter/fiat_adapter.py +1541 -1541
  4. flood_adapt/adapter/interface/hazard_adapter.py +70 -70
  5. flood_adapt/adapter/interface/impact_adapter.py +36 -36
  6. flood_adapt/adapter/interface/model_adapter.py +89 -89
  7. flood_adapt/adapter/interface/offshore.py +19 -19
  8. flood_adapt/adapter/sfincs_adapter.py +1853 -1848
  9. flood_adapt/adapter/sfincs_offshore.py +187 -193
  10. flood_adapt/config/config.py +248 -248
  11. flood_adapt/config/fiat.py +219 -219
  12. flood_adapt/config/gui.py +331 -331
  13. flood_adapt/config/sfincs.py +481 -336
  14. flood_adapt/config/site.py +129 -129
  15. flood_adapt/database_builder/database_builder.py +2210 -2210
  16. flood_adapt/database_builder/templates/default_units/imperial.toml +9 -9
  17. flood_adapt/database_builder/templates/default_units/metric.toml +9 -9
  18. flood_adapt/database_builder/templates/green_infra_table/green_infra_lookup_table.csv +10 -10
  19. flood_adapt/database_builder/templates/infographics/OSM/config_charts.toml +90 -90
  20. flood_adapt/database_builder/templates/infographics/OSM/config_people.toml +57 -57
  21. flood_adapt/database_builder/templates/infographics/OSM/config_risk_charts.toml +121 -121
  22. flood_adapt/database_builder/templates/infographics/OSM/config_roads.toml +65 -65
  23. flood_adapt/database_builder/templates/infographics/OSM/styles.css +45 -45
  24. flood_adapt/database_builder/templates/infographics/US_NSI/config_charts.toml +126 -126
  25. flood_adapt/database_builder/templates/infographics/US_NSI/config_people.toml +60 -60
  26. flood_adapt/database_builder/templates/infographics/US_NSI/config_risk_charts.toml +121 -121
  27. flood_adapt/database_builder/templates/infographics/US_NSI/config_roads.toml +65 -65
  28. flood_adapt/database_builder/templates/infographics/US_NSI/styles.css +45 -45
  29. flood_adapt/database_builder/templates/infometrics/OSM/metrics_additional_risk_configs.toml +4 -4
  30. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config.toml +143 -143
  31. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config_risk.toml +153 -153
  32. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config.toml +127 -127
  33. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config_risk.toml +57 -57
  34. flood_adapt/database_builder/templates/infometrics/US_NSI/metrics_additional_risk_configs.toml +4 -4
  35. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config.toml +191 -191
  36. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config_risk.toml +153 -153
  37. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config.toml +178 -178
  38. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config_risk.toml +57 -57
  39. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config.toml +9 -9
  40. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config_risk.toml +65 -65
  41. flood_adapt/database_builder/templates/output_layers/bin_colors.toml +5 -5
  42. flood_adapt/database_builder.py +16 -16
  43. flood_adapt/dbs_classes/__init__.py +21 -21
  44. flood_adapt/dbs_classes/database.py +533 -684
  45. flood_adapt/dbs_classes/dbs_benefit.py +77 -76
  46. flood_adapt/dbs_classes/dbs_event.py +61 -59
  47. flood_adapt/dbs_classes/dbs_measure.py +112 -111
  48. flood_adapt/dbs_classes/dbs_projection.py +34 -34
  49. flood_adapt/dbs_classes/dbs_scenario.py +137 -137
  50. flood_adapt/dbs_classes/dbs_static.py +274 -273
  51. flood_adapt/dbs_classes/dbs_strategy.py +130 -129
  52. flood_adapt/dbs_classes/dbs_template.py +279 -278
  53. flood_adapt/dbs_classes/interface/database.py +107 -139
  54. flood_adapt/dbs_classes/interface/element.py +121 -121
  55. flood_adapt/dbs_classes/interface/static.py +47 -47
  56. flood_adapt/flood_adapt.py +1229 -1178
  57. flood_adapt/misc/database_user.py +16 -16
  58. flood_adapt/misc/exceptions.py +22 -0
  59. flood_adapt/misc/log.py +183 -183
  60. flood_adapt/misc/path_builder.py +54 -54
  61. flood_adapt/misc/utils.py +185 -185
  62. flood_adapt/objects/__init__.py +82 -82
  63. flood_adapt/objects/benefits/benefits.py +61 -61
  64. flood_adapt/objects/events/event_factory.py +135 -135
  65. flood_adapt/objects/events/event_set.py +88 -84
  66. flood_adapt/objects/events/events.py +236 -234
  67. flood_adapt/objects/events/historical.py +58 -58
  68. flood_adapt/objects/events/hurricane.py +68 -67
  69. flood_adapt/objects/events/synthetic.py +46 -50
  70. flood_adapt/objects/forcing/__init__.py +92 -92
  71. flood_adapt/objects/forcing/csv.py +68 -68
  72. flood_adapt/objects/forcing/discharge.py +66 -66
  73. flood_adapt/objects/forcing/forcing.py +150 -150
  74. flood_adapt/objects/forcing/forcing_factory.py +182 -182
  75. flood_adapt/objects/forcing/meteo_handler.py +93 -93
  76. flood_adapt/objects/forcing/netcdf.py +40 -40
  77. flood_adapt/objects/forcing/plotting.py +453 -429
  78. flood_adapt/objects/forcing/rainfall.py +98 -98
  79. flood_adapt/objects/forcing/tide_gauge.py +191 -191
  80. flood_adapt/objects/forcing/time_frame.py +90 -90
  81. flood_adapt/objects/forcing/timeseries.py +564 -564
  82. flood_adapt/objects/forcing/unit_system.py +580 -580
  83. flood_adapt/objects/forcing/waterlevels.py +108 -108
  84. flood_adapt/objects/forcing/wind.py +124 -124
  85. flood_adapt/objects/measures/measure_factory.py +92 -92
  86. flood_adapt/objects/measures/measures.py +551 -529
  87. flood_adapt/objects/object_model.py +74 -68
  88. flood_adapt/objects/projections/projections.py +103 -103
  89. flood_adapt/objects/scenarios/scenarios.py +22 -22
  90. flood_adapt/objects/strategies/strategies.py +89 -89
  91. flood_adapt/workflows/benefit_runner.py +579 -554
  92. flood_adapt/workflows/floodmap.py +85 -85
  93. flood_adapt/workflows/impacts_integrator.py +85 -85
  94. flood_adapt/workflows/scenario_runner.py +70 -70
  95. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/LICENSE +674 -674
  96. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/METADATA +867 -865
  97. flood_adapt-0.3.11.dist-info/RECORD +140 -0
  98. flood_adapt-0.3.9.dist-info/RECORD +0 -139
  99. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/WHEEL +0 -0
  100. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/top_level.txt +0 -0
@@ -1,1541 +1,1541 @@
1
- import logging
2
- import math
3
- import os
4
- import shutil
5
- import subprocess
6
- from pathlib import Path
7
- from typing import Any, Optional, Union
8
-
9
- import geopandas as gpd
10
- import pandas as pd
11
- import tomli
12
- from fiat_toolbox import FiatColumns, get_fiat_columns
13
- from fiat_toolbox.equity.equity import Equity
14
- from fiat_toolbox.infographics.infographics_factory import InforgraphicFactory
15
- from fiat_toolbox.metrics_writer.fiat_write_metrics_file import MetricsFileWriter
16
- from fiat_toolbox.metrics_writer.fiat_write_return_period_threshold import (
17
- ExceedanceProbabilityCalculator,
18
- )
19
- from fiat_toolbox.spatial_output.aggregation_areas import AggregationAreas
20
- from fiat_toolbox.spatial_output.footprints import Footprints
21
- from fiat_toolbox.utils import extract_variables, matches_pattern, replace_pattern
22
- from hydromt_fiat.fiat import FiatModel
23
-
24
- from flood_adapt.adapter.interface.impact_adapter import IImpactAdapter
25
- from flood_adapt.config.fiat import FiatConfigModel
26
- from flood_adapt.misc.log import FloodAdaptLogging
27
- from flood_adapt.misc.path_builder import (
28
- ObjectDir,
29
- )
30
- from flood_adapt.misc.utils import cd, resolve_filepath
31
- from flood_adapt.objects.events.events import Mode
32
- from flood_adapt.objects.forcing import unit_system as us
33
- from flood_adapt.objects.measures.measures import (
34
- Buyout,
35
- Elevate,
36
- FloodProof,
37
- Measure,
38
- MeasureType,
39
- )
40
- from flood_adapt.objects.projections.projections import Projection
41
- from flood_adapt.objects.scenarios.scenarios import Scenario
42
- from flood_adapt.workflows.floodmap import FloodMap, FloodmapType
43
- from flood_adapt.workflows.impacts_integrator import Impacts
44
-
45
- # Define naming structure for saved files
46
- _IMPACT_COLUMNS = FiatColumns(
47
- object_id="Object ID",
48
- object_name="Object Name",
49
- primary_object_type="Primary Object Type",
50
- secondary_object_type="Secondary Object Type",
51
- extraction_method="Extraction Method",
52
- ground_floor_height="Ground Floor Height",
53
- ground_elevation="Ground Elevation",
54
- damage_function="Damage Function: {name}",
55
- max_potential_damage="Max Potential Damage: {name}",
56
- aggregation_label="Aggregation Label: {name}",
57
- inundation_depth="Inundation Depth",
58
- inundation_depth_rp="Inundation Depth ({years}Y)",
59
- reduction_factor="Reduction Factor",
60
- reduction_factor_rp="Reduction Factor ({years}Y)",
61
- damage="Damage: {name}",
62
- damage_rp="Damage: {name} ({years}Y)",
63
- total_damage="Total Damage",
64
- total_damage_rp="Total Damage ({years}Y)",
65
- risk_ead="Risk (EAD)",
66
- segment_length="Segment Length",
67
- )
68
-
69
- # Define column naming of FIAT model
70
- _FIAT_COLUMNS: FiatColumns = get_fiat_columns(
71
- fiat_version="0.2.1"
72
- ) # columns of FIAT # TODO add version from config
73
-
74
-
75
- class FiatAdapter(IImpactAdapter):
76
- """
77
- ImpactAdapter for Delft-FIAT.
78
-
79
- It includes:
80
- - preprocessing methods for adding measures, projections and hazards
81
- - executing method for running a Delft-FIAT simulation
82
- - postprocessing methods for saving impact results
83
- """
84
-
85
- # TODO deal with all the relative paths for the files used
86
- # TODO IImpactAdapter and general Adapter class should NOT use the database
87
-
88
- _model: Optional[FiatModel] = None
89
- config: Optional[FiatConfigModel] = None
90
- exe_path: Optional[os.PathLike] = None
91
- fiat_columns: FiatColumns
92
- impact_columns: FiatColumns
93
-
94
- def __init__(
95
- self,
96
- model_root: Path,
97
- config: Optional[FiatConfigModel] = None,
98
- exe_path: Optional[os.PathLike] = None,
99
- delete_crashed_runs: bool = True,
100
- config_base_path: Optional[os.PathLike] = None,
101
- ) -> None:
102
- # TODO should exe_path and delete_crashed_runs be part of the config?
103
- # Load FIAT template
104
- self.logger = FloodAdaptLogging.getLogger("FiatAdapter")
105
- self.config = config
106
- self.config_base_path = config_base_path
107
- self.exe_path = exe_path
108
- self.delete_crashed_runs = delete_crashed_runs
109
- self._model_root = str(model_root.resolve())
110
- self.fiat_columns = _FIAT_COLUMNS
111
- self.impact_columns = _IMPACT_COLUMNS # columns of FA impact output
112
-
113
- @property
114
- def model(self) -> FiatModel:
115
- """Lazily load and cache the FiatModel."""
116
- if self._model is None:
117
- self._model = FiatModel(root=self._model_root, mode="r")
118
- self._model.read()
119
- return self._model
120
-
121
- @property
122
- def model_root(self):
123
- return Path(self.model.root)
124
-
125
- @property
126
- def damage_types(self):
127
- """Get the damage types that are present in the exposure."""
128
- types = []
129
- for col in self.model.exposure.exposure_db.columns:
130
- if matches_pattern(col, self.fiat_columns.damage_function):
131
- name = extract_variables(col, self.fiat_columns.damage_function)["name"]
132
- types.append(name)
133
- return types
134
-
135
- def read(self, path: Path) -> None:
136
- """Read the fiat model from the current model root."""
137
- if Path(self.model.root).resolve() != Path(path).resolve():
138
- self.model.set_root(root=str(path), mode="r")
139
- self.model.read()
140
-
141
- def write(self, path_out: Union[str, os.PathLike], overwrite: bool = True) -> None:
142
- """Write the fiat model configuration to a directory."""
143
- if not isinstance(path_out, Path):
144
- path_out = Path(path_out).resolve()
145
-
146
- if not path_out.exists():
147
- path_out.mkdir(parents=True)
148
-
149
- write_mode = "w+" if overwrite else "w"
150
- with cd(path_out):
151
- self.model.set_root(root=str(path_out), mode=write_mode)
152
- self.model.write()
153
-
154
- def close_files(self):
155
- """Close all open files and clean up file handles."""
156
- if hasattr(self.logger, "handlers"):
157
- for handler in self.logger.handlers:
158
- if isinstance(handler, logging.FileHandler):
159
- handler.close()
160
- self.logger.removeHandler(handler)
161
-
162
- def __enter__(self) -> "FiatAdapter":
163
- return self
164
-
165
- def __exit__(self, exc_type, exc_value, traceback) -> bool:
166
- self.close_files()
167
- return False
168
-
169
- def has_run(self, scenario: Scenario) -> bool:
170
- # TODO this should include a check for all output files , and then maybe save them as output paths and types
171
- """
172
- Check if the impact results file for the given scenario exists.
173
-
174
- Parameters
175
- ----------
176
- scenario : Scenario
177
- The scenario for which to check the FIAT results.
178
-
179
- Returns
180
- -------
181
- bool
182
- True if the FIAT results file exists, False otherwise.
183
- """
184
- impacts_path = Impacts(scenario=scenario).impacts_path
185
-
186
- fiat_results_path = impacts_path.joinpath(
187
- f"Impacts_detailed_{scenario.name}.csv"
188
- )
189
- return fiat_results_path.exists()
190
-
191
- def delete_model(self):
192
- """
193
- Delete the Delft-FIAT simulation folder.
194
-
195
- This method attempts to delete the directory specified by `self.model_root`.
196
-
197
- Raises
198
- ------
199
- OSError: If the directory cannot be deleted.
200
- """
201
- self.logger.info("Deleting Delft-FIAT simulation folder")
202
- try:
203
- shutil.rmtree(self.model_root)
204
- except OSError as e_info:
205
- self.logger.warning(f"{e_info}\nCould not delete {self.model_root}.")
206
-
207
- def fiat_completed(self) -> bool:
208
- """Check if fiat has run as expected.
209
-
210
- Returns
211
- -------
212
- boolean
213
- True if fiat has run, False if something went wrong
214
- """
215
- log_file = self.model_root.joinpath(
216
- self.model.config["output"]["path"], "fiat.log"
217
- )
218
- if not log_file.exists():
219
- return False
220
- try:
221
- with open(log_file, "r", encoding="cp1252") as f:
222
- return "Geom calculation are done!" in f.read()
223
- except Exception as e:
224
- self.logger.error(f"Error while checking if FIAT has run: {e}")
225
- return False
226
-
227
- def preprocess(self, scenario: Scenario) -> None:
228
- """
229
- Preprocess the FIAT-model given a scenario by setting up projections, measures, and hazards, and then saves any changes made to disk.
230
-
231
- Args:
232
- scenario (Scenario): The scenario to preprocess, which includes projection,
233
- strategy, and hazard.
234
-
235
- Returns
236
- -------
237
- None
238
- """
239
- self.logger.info("Pre-processing Delft-FIAT model")
240
- # Projection
241
- projection = self.database.projections.get(scenario.projection)
242
- self.add_projection(projection)
243
-
244
- # Measures
245
- strategy = self.database.strategies.get(scenario.strategy)
246
- for measure in strategy.get_impact_measures():
247
- self.add_measure(measure)
248
-
249
- # Hazard
250
- floodmap = FloodMap(scenario.name)
251
- var = "risk_maps" if floodmap.mode == Mode.risk else "zsmax"
252
- is_risk = floodmap.mode == Mode.risk
253
- self.set_hazard(
254
- map_fn=floodmap.path,
255
- map_type=floodmap.type,
256
- var=var,
257
- is_risk=is_risk,
258
- units=us.UnitTypesLength.meters,
259
- )
260
-
261
- # Save any changes made to disk as well
262
- output_path = Impacts(scenario).impacts_path / "fiat_model"
263
- self.write(path_out=output_path)
264
-
265
- def run(self, scenario) -> None:
266
- """
267
- Execute the full process for a given scenario, including preprocessing, executing the simulation, and postprocessing steps.
268
-
269
- Args:
270
- scenario: An object containing the scenario data.
271
-
272
- Returns
273
- -------
274
- None
275
- """
276
- sim_path = Impacts(scenario=scenario).impacts_path / "fiat_model"
277
-
278
- self.preprocess(scenario)
279
- self.execute(sim_path)
280
- self.postprocess(scenario)
281
-
282
- def execute(
283
- self,
284
- path: Optional[os.PathLike] = None,
285
- exe_path: Optional[os.PathLike] = None,
286
- delete_crashed_runs: Optional[bool] = None,
287
- strict=True,
288
- ) -> bool:
289
- """
290
- Execute the FIAT model.
291
-
292
- Parameters
293
- ----------
294
- path : Optional[os.PathLike], optional
295
- The path to the model directory. If not provided, defaults to `self.model_root`.
296
- exe_path : Optional[os.PathLike], optional
297
- The path to the FIAT executable. If not provided, defaults to `self.exe_path`.
298
- delete_crashed_runs : Optional[bool], optional
299
- Whether to delete files from crashed runs. If not provided, defaults to `self.delete_crashed_runs`.
300
- strict : bool, optional
301
- Whether to raise an error if the FIAT model fails to run. Defaults to True.
302
-
303
- Returns
304
- -------
305
- bool
306
- True if the FIAT model run successfully, False otherwise.
307
-
308
- Raises
309
- ------
310
- ValueError
311
- If `exe_path` is not provided and `self.exe_path` is None.
312
- RuntimeError
313
- If the FIAT model fails to run and `strict` is True.
314
- """
315
- if path is None:
316
- path = self.model_root
317
- if exe_path is None:
318
- if self.exe_path is None:
319
- raise ValueError(
320
- "'exe_path' needs to be provided either when calling FiatAdapter.execute() or during initialization of the FiatAdapter object."
321
- )
322
- exe_path = self.exe_path
323
- if delete_crashed_runs is None:
324
- delete_crashed_runs = self.delete_crashed_runs
325
- path = Path(path)
326
- fiat_log = path / "fiat.log"
327
- with cd(path):
328
- with FloodAdaptLogging.to_file(file_path=fiat_log):
329
- FiatAdapter._ensure_correct_hash_spacing_in_csv(path)
330
-
331
- self.logger.info(f"Running FIAT in {path}")
332
- process = subprocess.run(
333
- args=[Path(exe_path).resolve().as_posix(), "run", "settings.toml"],
334
- stdout=subprocess.PIPE,
335
- stderr=subprocess.PIPE,
336
- text=True,
337
- )
338
- self.logger.debug(process.stdout)
339
-
340
- if process.returncode != 0:
341
- if delete_crashed_runs:
342
- # Remove all files in the simulation folder except for the log files
343
- for subdir, dirs, files in os.walk(path, topdown=False):
344
- for file in files:
345
- if not file.endswith(".log"):
346
- os.remove(os.path.join(subdir, file))
347
-
348
- if not os.listdir(subdir):
349
- os.rmdir(subdir)
350
-
351
- if strict:
352
- raise RuntimeError(f"FIAT model failed to run in {path}.")
353
- else:
354
- self.logger.error(f"FIAT model failed to run in {path}.")
355
-
356
- if process.returncode == 0:
357
- self.read_outputs()
358
-
359
- return process.returncode == 0
360
-
361
- def read_outputs(self) -> None:
362
- """
363
- Read the output FIAT CSV file specified in the model configuration and stores the data in the `outputs` attribute.
364
-
365
- Attributes
366
- ----------
367
- outputs : dict
368
- A dictionary containing the following keys:
369
- - "path" : Path
370
- The path to the output directory.
371
- - "table" : DataFrame
372
- The contents of the output CSV file.
373
- """
374
- # Get output path
375
- outputs_path = self.model_root.joinpath(self.model.config["output"]["path"])
376
-
377
- # Get all csvs and concatenate them in a single table
378
- csv_outputs_df = []
379
- for output_csv in self.model.config["output"]["csv"]:
380
- csv_path = outputs_path.joinpath(
381
- self.model.config["output"]["csv"][output_csv]
382
- )
383
- output_csv_df = pd.read_csv(csv_path)
384
- csv_outputs_df.append(output_csv_df)
385
- output_csv = pd.concat(csv_outputs_df)
386
-
387
- # Store them
388
- self.outputs = {}
389
- self.outputs["path"] = outputs_path
390
- self.outputs["table"] = output_csv
391
-
392
- def _get_aggr_ind(self, aggr_label: str):
393
- """
394
- Retrieve the index of the aggregation configuration that matches the given label.
395
-
396
- Parameters
397
- ----------
398
- aggr_label : str
399
- The label of the aggregation to find.
400
-
401
- Returns
402
- -------
403
- int
404
- The index of the aggregation configuration that matches the given label.
405
-
406
- Raises
407
- ------
408
- IndexError
409
- If no aggregation with the given label is found.
410
- """
411
- ind = [
412
- i
413
- for i, aggr in enumerate(self.config.aggregation)
414
- if aggr.name == aggr_label
415
- ][0]
416
-
417
- return ind
418
-
419
- def postprocess(self, scenario):
420
- """
421
- Post-process the results of the Delft-FIAT simulation for a given scenario.
422
-
423
- Parameters
424
- ----------
425
- scenario : Scenario
426
- The scenario object containing all relevant data and configurations.
427
-
428
- Raises
429
- ------
430
- RuntimeError
431
- If the Delft-FIAT simulation did not run successfully.
432
-
433
- Post-processing steps include:
434
- - Reading the outputs of the Delft-FIAT simulation.
435
- - Adding exceedance probabilities for risk mode scenarios.
436
- - Saving detailed impacts per object to a CSV file.
437
- - Creating infometrics files based on different metric configurations.
438
- - Generating infographic files if configured.
439
- - Calculating equity-based damages for risk mode scenarios.
440
- - Saving aggregated metrics to shapefiles.
441
- - Merging points data to building footprints.
442
- - Creating a roads spatial file if configured.
443
- - Deleting the simulation folder if the site configuration is set to not keep the simulation.
444
-
445
- Logging
446
- -------
447
- Logs the start and completion of the post-processing steps.
448
- """
449
- if not self.fiat_completed():
450
- raise RuntimeError("Delft-FIAT did not run successfully!")
451
-
452
- self.logger.info("Post-processing Delft-FIAT results")
453
-
454
- if not self.outputs:
455
- self.read_outputs()
456
- mode = self.database.events.get(scenario.event).mode
457
-
458
- # Define scenario output path
459
- impacts = Impacts(scenario=scenario)
460
- scenario_output_path = impacts.results_path
461
- impacts_output_path = impacts.impacts_path
462
-
463
- # Create column mapping to update column names
464
- name_translation = {}
465
- for col in self.outputs["table"].columns: # iterate through output columns
466
- for field in list(self.impact_columns.model_fields): # check for each field
467
- fiat_col = getattr(self.fiat_columns, field)
468
- if matches_pattern(col, fiat_col):
469
- impact_col = getattr(self.impact_columns, field)
470
- new_col = replace_pattern(col, fiat_col, impact_col)
471
- if (
472
- ".0Y" in new_col
473
- ): # TODO for now quick fix to account for float RP years, while metrics have integers
474
- new_col = new_col.replace(".0Y", "Y")
475
- name_translation[col] = new_col # save mapping
476
- self.name_mapping = name_translation
477
-
478
- # Rename save outputs
479
- self.outputs["table"] = self.outputs["table"].rename(columns=self.name_mapping)
480
-
481
- # Save impacts per object
482
- fiat_results_path = impacts_output_path.joinpath(
483
- f"Impacts_detailed_{scenario.name}.csv"
484
- )
485
- self.outputs["table"].to_csv(fiat_results_path, index=False)
486
-
487
- # Add exceedance probabilities if needed (only for risk)
488
- if mode == Mode.risk:
489
- # Get config path
490
- # TODO check where this configs should be read from
491
- config_path = self.database.static_path.joinpath(
492
- "templates", "infometrics", "metrics_additional_risk_configs.toml"
493
- )
494
- with open(config_path, mode="rb") as fp:
495
- config = tomli.load(fp)["flood_exceedance"]
496
- self.add_exceedance_probability(
497
- column=config[
498
- "column"
499
- ], # TODO check how to the correct version of column
500
- threshold=config["threshold"],
501
- period=config["period"],
502
- )
503
-
504
- # Create the infometrics files
505
- if mode == Mode.risk:
506
- ext = "_risk"
507
- else:
508
- ext = ""
509
-
510
- # Get options for metric configurations
511
- metric_types = ["mandatory", "additional"] # these are checked always
512
-
513
- if self.config.infographics: # if infographics are created
514
- metric_types += ["infographic"]
515
-
516
- metric_config_paths = [
517
- self.database.static_path.joinpath(
518
- "templates", "infometrics", f"{name}_metrics_config{ext}.toml"
519
- )
520
- for name in metric_types
521
- ]
522
-
523
- # Specify the metrics output path
524
- metrics_outputs_path = scenario_output_path.joinpath(
525
- f"Infometrics_{scenario.name}.csv"
526
- )
527
- self.create_infometrics(metric_config_paths, metrics_outputs_path)
528
-
529
- # Get paths of created aggregated infometrics
530
- aggr_metrics_paths = list(
531
- metrics_outputs_path.parent.glob(f"{metrics_outputs_path.stem}_*.csv")
532
- )
533
-
534
- # Create the infographic files
535
- if self.config.infographics:
536
- config_base_path = self.database.static_path.joinpath(
537
- "templates", "Infographics"
538
- )
539
- self.create_infographics(
540
- name=scenario.name,
541
- output_base_path=scenario_output_path,
542
- config_base_path=config_base_path,
543
- metrics_path=metrics_outputs_path,
544
- mode=mode,
545
- )
546
-
547
- # Calculate equity based damages
548
- if mode == Mode.risk:
549
- for file in aggr_metrics_paths:
550
- # Load metrics
551
- aggr_label = file.stem.split(f"{metrics_outputs_path.stem}_")[-1]
552
- self.add_equity(aggr_label=aggr_label, metrics_path=file)
553
-
554
- # Save aggregated metrics to shapefiles
555
- for file in aggr_metrics_paths:
556
- aggr_label = file.stem.split(f"{metrics_outputs_path.stem}_")[-1]
557
- output_path = impacts_output_path.joinpath(
558
- f"Impacts_aggregated_{scenario.name}_{aggr_label}.gpkg"
559
- )
560
- self.save_aggregation_spatial(
561
- aggr_label=aggr_label, metrics_path=file, output_path=output_path
562
- )
563
-
564
- # Merge points data to building footprints
565
- self.save_building_footprints(
566
- output_path=impacts_output_path.joinpath(
567
- f"Impacts_building_footprints_{scenario.name}.gpkg"
568
- )
569
- )
570
-
571
- # Create a roads spatial file
572
- if self.config.roads_file_name:
573
- self.save_roads(
574
- output_path=impacts_output_path.joinpath(
575
- f"Impacts_roads_{scenario.name}.gpkg"
576
- )
577
- )
578
-
579
- self.logger.info("Delft-FIAT post-processing complete!")
580
-
581
- # If site config is set to not keep FIAT simulation, delete folder
582
- if not self.config.save_simulation:
583
- self.delete_model()
584
-
585
- def add_measure(self, measure: Measure):
586
- """
587
- Add and apply a specific impact measure to the properties of the FIAT model.
588
-
589
- Parameters
590
- ----------
591
- measure : Measure
592
- The impact measure to be applied. It can be of type Elevate, FloodProof, or Buyout.
593
-
594
- Notes
595
- -----
596
- The method logs the application of the measure and calls the appropriate method based on the measure type:
597
- - Elevate: Calls elevate_properties(measure)
598
- - FloodProof: Calls floodproof_properties(measure)
599
- - Buyout: Calls buyout_properties(measure)
600
-
601
- If the measure type is unsupported, a warning is logged.
602
- """
603
- self.logger.info(f"Applying impact measure '{measure.name}'")
604
- if isinstance(measure, Elevate):
605
- self.elevate_properties(measure)
606
- elif isinstance(measure, FloodProof):
607
- self.floodproof_properties(measure)
608
- elif isinstance(measure, Buyout):
609
- self.buyout_properties(measure)
610
- else:
611
- self.logger.warning(
612
- f"Skipping unsupported measure type {measure.__class__.__name__}"
613
- )
614
-
615
- def add_projection(self, projection: Projection):
616
- """
617
- Add the socioeconomic changes part of a projection to the FIAT model.
618
-
619
- Parameters
620
- ----------
621
- projection : Projection
622
- The projection object containing socioeconomic changes to be applied.
623
-
624
- Notes
625
- -----
626
- - Economic growth is applied to all existing buildings if specified.
627
- - New population growth areas are added if specified, taking into account
628
- economic growth.
629
- - Population growth is applied to existing objects if specified.
630
- """
631
- self.logger.info(
632
- f"Applying socioeconomic changes from projection '{projection.name}'"
633
- )
634
- socio_economic_change = projection.socio_economic_change
635
-
636
- ids_all_buildings = self.get_all_building_ids()
637
-
638
- # Implement socioeconomic changes if needed
639
- # First apply economic growth to existing objects
640
- if not math.isclose(socio_economic_change.economic_growth, 0, abs_tol=1e-6):
641
- self.apply_economic_growth(
642
- economic_growth=socio_economic_change.economic_growth,
643
- ids=ids_all_buildings, #
644
- )
645
-
646
- # Then the new population growth area is added if provided
647
- # In the new areas, the economic growth is taken into account!
648
- # Order matters since for the pop growth new, we only want the economic growth!
649
- if not math.isclose(
650
- socio_economic_change.population_growth_new, 0, abs_tol=1e-6
651
- ):
652
- # Get path of new development area geometry
653
- area_path = resolve_filepath(
654
- object_dir=ObjectDir.projection,
655
- obj_name=projection.name,
656
- path=socio_economic_change.new_development_shapefile,
657
- )
658
-
659
- # Get DEM location for assigning elevation to new areas
660
- dem = (
661
- self.database.static_path
662
- / "dem"
663
- / self.database.site.sfincs.dem.filename
664
- )
665
- # Call adapter method to add the new areas
666
- self.apply_population_growth_new(
667
- population_growth=socio_economic_change.population_growth_new,
668
- ground_floor_height=socio_economic_change.new_development_elevation.value,
669
- elevation_type=socio_economic_change.new_development_elevation.type,
670
- area_path=area_path,
671
- ground_elevation=dem,
672
- )
673
-
674
- # Then apply population growth to existing objects
675
- if not math.isclose(
676
- socio_economic_change.population_growth_existing, 0, abs_tol=1e-6
677
- ):
678
- self.apply_population_growth_existing(
679
- population_growth=socio_economic_change.population_growth_existing,
680
- ids=ids_all_buildings,
681
- )
682
-
683
- def set_hazard(
684
- self,
685
- map_fn: Union[os.PathLike, list[os.PathLike]],
686
- map_type: FloodmapType,
687
- var: str,
688
- is_risk: bool = False,
689
- units: str = us.UnitTypesLength.meters,
690
- ) -> None:
691
- """
692
- Set the hazard map and type for the FIAT model.
693
-
694
- Parameters
695
- ----------
696
- map_fn : str
697
- The filename of the hazard map.
698
- map_type : FloodmapType
699
- The type of the flood map.
700
- var : str
701
- The variable name in the hazard map.
702
- is_risk : bool, optional
703
- Flag indicating if the map is a risk output. Defaults to False.
704
- units : str, optional
705
- The units of the hazard map. Defaults to us.UnitTypesLength.meters.
706
- """
707
- self.logger.info(f"Setting hazard to the {map_type} map {map_fn}")
708
- # Add the floodmap data to a data catalog with the unit conversion
709
- wl_current_units = us.UnitfulLength(value=1.0, units=units)
710
- conversion_factor = wl_current_units.convert(self.model.exposure.unit)
711
-
712
- self.model.setup_hazard(
713
- map_fn=map_fn,
714
- map_type=map_type,
715
- rp=None,
716
- crs=None, # change this in new version (maybe to str(floodmap.crs.split(':')[1]))
717
- nodata=-999, # change this in new version
718
- var=var,
719
- chunks="auto",
720
- risk_output=is_risk,
721
- unit_conversion_factor=conversion_factor,
722
- )
723
-
724
- # PROJECTIONS
725
-
726
- def apply_economic_growth(
727
- self, economic_growth: float, ids: Optional[list] = None
728
- ) -> None:
729
- """
730
- Apply economic growth to the FIAT-Model by adjusting the maximum potential damage values in the exposure database.
731
-
732
- This method updates the max potential damage values in the exposure database by
733
- applying a given economic growth rate. It can optionally filter the updates to
734
- specific objects identified by their IDs.
735
-
736
- Parameters
737
- ----------
738
- economic_growth : float
739
- The economic growth rate to apply, expressed as a percentage.
740
- ids : Optional[list], default=None
741
- A list of object IDs to which the economic growth should be applied. If None, the growth is applied to all buildings.
742
- """
743
- self.logger.info(f"Applying economic growth of {economic_growth} %.")
744
- # Get columns that include max damage
745
- damage_cols = [
746
- c
747
- for c in self.model.exposure.exposure_db.columns
748
- if matches_pattern(c, self.fiat_columns.max_potential_damage)
749
- ]
750
-
751
- # Get objects that are buildings (using site info)
752
- buildings_rows = ~self.model.exposure.exposure_db[
753
- self.fiat_columns.primary_object_type
754
- ].isin(self.config.non_building_names)
755
-
756
- # If ids are given use that as an additional filter
757
- if ids:
758
- buildings_rows = buildings_rows & self.model.exposure.exposure_db[
759
- self.fiat_columns.object_id
760
- ].isin(ids)
761
-
762
- # Update columns using economic growth value
763
- updated_max_pot_damage = self.model.exposure.exposure_db.copy()
764
- updated_max_pot_damage.loc[buildings_rows, damage_cols] *= (
765
- 1.0 + economic_growth / 100.0
766
- )
767
-
768
- # update fiat model
769
- self.model.exposure.update_max_potential_damage(
770
- updated_max_potential_damages=updated_max_pot_damage
771
- )
772
-
773
- def apply_population_growth_existing(
774
- self, population_growth: float, ids: Optional[list[str]] = None
775
- ) -> None:
776
- """
777
- Apply population growth to the FIAT-Model by adjusting the existing max potential damage values for buildings.
778
-
779
- This method updates the max potential damage values in the exposure database by
780
- applying a given population growth rate. It can optionally filter the updates to
781
- specific objects identified by their IDs.
782
-
783
- Parameters
784
- ----------
785
- population_growth : float
786
- The population growth rate as a percentage.
787
- ids : Optional[list[str]]
788
- A list of object IDs to filter the updates. If None, the updates are applied to all buildings.
789
- """
790
- self.logger.info(f"Applying population growth of {population_growth} %.")
791
- # Get columns that include max damage
792
- damage_cols = [
793
- c
794
- for c in self.model.exposure.exposure_db.columns
795
- if matches_pattern(c, self.fiat_columns.max_potential_damage)
796
- ]
797
-
798
- # Get objects that are buildings (using site info)
799
- buildings_rows = ~self.model.exposure.exposure_db[
800
- self.fiat_columns.primary_object_type
801
- ].isin(self.config.non_building_names)
802
-
803
- # If ids are given use that as an additional filter
804
- if ids:
805
- buildings_rows = buildings_rows & self.model.exposure.exposure_db[
806
- self.fiat_columns.object_id
807
- ].isin(ids)
808
-
809
- # Update columns using economic growth value
810
- updated_max_pot_damage = self.model.exposure.exposure_db.copy()
811
- updated_max_pot_damage.loc[buildings_rows, damage_cols] *= (
812
- 1.0 + population_growth / 100.0
813
- )
814
-
815
- # update fiat model
816
- self.model.exposure.update_max_potential_damage(
817
- updated_max_potential_damages=updated_max_pot_damage
818
- )
819
-
820
- def apply_population_growth_new(
821
- self,
822
- population_growth: float,
823
- ground_floor_height: float,
824
- elevation_type: str,
825
- area_path: str,
826
- ground_elevation: Union[None, str, Path] = None,
827
- ) -> None:
828
- """
829
- Apply population growth in a new area by adding new objects in the model.
830
-
831
- Parameters
832
- ----------
833
- population_growth : float
834
- The percentage of population growth to apply.
835
- ground_floor_height : float
836
- The height of the ground floor.
837
- elevation_type : str
838
- The type of elevation reference to use. Must be either 'floodmap' or 'datum'.
839
- area_path : str
840
- The path to the area file.
841
- ground_elevation : Union[None, str, Path], optional
842
- The ground elevation reference. Default is None.
843
-
844
- Raises
845
- ------
846
- ValueError
847
- If `elevation_type` is 'floodmap' and base flood elevation (bfe) map is not provided.
848
- If `elevation_type` is not 'floodmap' or 'datum'.
849
- """
850
- self.logger.info(
851
- f"Applying population growth of {population_growth} %, by creating a new development area using the geometries from {area_path} and a ground floor height of {ground_floor_height} {self.model.exposure.unit} above '{elevation_type}'."
852
- )
853
- # Get reference type to align with hydromt
854
- if elevation_type == "floodmap":
855
- if not self.config.bfe:
856
- raise ValueError(
857
- "Base flood elevation (bfe) map is required to use 'floodmap' as reference."
858
- )
859
- kwargs = {
860
- "elevation_reference": "geom",
861
- "path_ref": self.database.static_path.joinpath(self.config.bfe.geom),
862
- "attr_ref": self.config.bfe.field_name,
863
- }
864
- elif elevation_type == "datum":
865
- kwargs = {"elevation_reference": "datum"}
866
- else:
867
- raise ValueError("elevation type can only be one of 'floodmap' or 'datum'")
868
- # Get aggregation areas info
869
- aggregation_areas = [
870
- self.database.static_path.joinpath(aggr.file)
871
- for aggr in self.config.aggregation
872
- ]
873
- attribute_names = [aggr.field_name for aggr in self.config.aggregation]
874
- label_names = [
875
- self.fiat_columns.aggregation_label.format(name=aggr.name)
876
- for aggr in self.config.aggregation
877
- ]
878
- new_dev_geom_name = Path(self.config.new_development_file_name).stem
879
- # Use hydromt function
880
- self.model.exposure.setup_new_composite_areas(
881
- percent_growth=population_growth,
882
- geom_file=Path(area_path),
883
- ground_floor_height=ground_floor_height,
884
- damage_types=self.damage_types,
885
- vulnerability=self.model.vulnerability,
886
- ground_elevation=ground_elevation,
887
- aggregation_area_fn=aggregation_areas,
888
- attribute_names=attribute_names,
889
- label_names=label_names,
890
- geom_name=new_dev_geom_name,
891
- **kwargs,
892
- )
893
-
894
- # MEASURES
895
- @staticmethod
896
- def _get_area_name(measure: Measure):
897
- """
898
- Determine the area name based on the selection type of the measure.
899
-
900
- Parameters
901
- ----------
902
- measure : Measure
903
- An instance of Measure containing attributes that define the selection type and area.
904
-
905
- Returns
906
- -------
907
- str
908
- The name of the area. It returns the aggregation area name if the selection type is "aggregation_area",
909
- the polygon file name if the selection type is "polygon", and "all" for any other selection type.
910
- """
911
- if measure.selection_type == "aggregation_area":
912
- area = measure.aggregation_area_name
913
- elif measure.selection_type == "polygon":
914
- area = measure.polygon_file
915
- else:
916
- area = "all"
917
- return area
918
-
919
- def elevate_properties(self, elevate: Elevate) -> None:
920
- """
921
- Elevate the ground floor height of properties based on the provided Elevate measure.
922
-
923
- Parameters
924
- ----------
925
- elevate : Elevate
926
- The Elevate measure containing the elevation details.
927
-
928
- Raises
929
- ------
930
- ValueError
931
- If the elevation type is 'floodmap' and the base flood elevation (bfe) map is not provided.
932
- If the elevation type is not 'floodmap' or 'datum'.
933
- """
934
- area = self._get_area_name(elevate)
935
- self.logger.info(
936
- f"Elevating '{elevate.property_type}' type properties in '{area}' by {elevate.elevation} relative to '{elevate.elevation.type}'."
937
- )
938
- # If ids are given use that as an additional filter
939
- objectids = self.get_object_ids(elevate)
940
-
941
- # Get reference type to align with hydromt
942
- if elevate.elevation.type == "floodmap":
943
- if not self.config.bfe:
944
- raise ValueError(
945
- "Base flood elevation (bfe) map is required to use 'floodmap' as reference."
946
- )
947
- if self.config.bfe.table:
948
- path_ref = self.config_base_path.joinpath(self.config.bfe.table)
949
- height_reference = "table"
950
- else:
951
- path_ref = self.config_base_path.joinpath(self.config.bfe.geom)
952
- height_reference = "geom"
953
- # Use hydromt function
954
- self.model.exposure.raise_ground_floor_height(
955
- raise_by=elevate.elevation.value,
956
- objectids=objectids,
957
- height_reference=height_reference,
958
- path_ref=path_ref,
959
- attr_ref=self.config.bfe.field_name,
960
- )
961
-
962
- elif elevate.elevation.type == "datum":
963
- # Use hydromt function
964
- self.model.exposure.raise_ground_floor_height(
965
- raise_by=elevate.elevation.value,
966
- objectids=objectids,
967
- height_reference="datum",
968
- )
969
- else:
970
- raise ValueError("elevation type can only be one of 'floodmap' or 'datum'")
971
-
972
- def buyout_properties(self, buyout: Buyout) -> None:
973
- """
974
- Apply the buyout measure to the properties by setting their maximum potential damage to zero.
975
-
976
- Parameters
977
- ----------
978
- buyout : Buyout
979
- The Buyout measure containing the details of the properties to be bought out.
980
-
981
- """
982
- area = self._get_area_name(buyout)
983
- self.logger.info(
984
- f"Buying-out '{buyout.property_type}' type properties in '{area}'."
985
- )
986
- # Get columns that include max damage
987
- damage_cols = [
988
- c
989
- for c in self.model.exposure.exposure_db.columns
990
- if matches_pattern(c, self.fiat_columns.max_potential_damage)
991
- ]
992
-
993
- # Get objects that are buildings (using site info)
994
- buildings_rows = ~self.model.exposure.exposure_db[
995
- self.fiat_columns.primary_object_type
996
- ].isin(self.config.non_building_names)
997
-
998
- # Get rows that are affected
999
- objectids = self.get_object_ids(buyout)
1000
- rows = (
1001
- self.model.exposure.exposure_db[self.fiat_columns.object_id].isin(objectids)
1002
- & buildings_rows
1003
- )
1004
-
1005
- # Update columns
1006
- updated_max_pot_damage = self.model.exposure.exposure_db.copy()
1007
- updated_max_pot_damage.loc[rows, damage_cols] *= 0
1008
-
1009
- # update fiat model
1010
- self.model.exposure.update_max_potential_damage(
1011
- updated_max_potential_damages=updated_max_pot_damage
1012
- )
1013
-
1014
- def floodproof_properties(self, floodproof: FloodProof) -> None:
1015
- """
1016
- Apply floodproofing measures to the properties by truncating the damage function.
1017
-
1018
- Parameters
1019
- ----------
1020
- floodproof : FloodProof
1021
- The FloodProof measure containing the details of the properties to be floodproofed.
1022
- """
1023
- area = self._get_area_name(floodproof)
1024
- self.logger.info(
1025
- f"Flood-proofing '{floodproof.property_type}' type properties in '{area}' by {floodproof.elevation}."
1026
- )
1027
- # If ids are given use that as an additional filter
1028
- objectids = self.get_object_ids(floodproof)
1029
-
1030
- # Use hydromt function
1031
- self.model.exposure.truncate_damage_function(
1032
- objectids=objectids,
1033
- floodproof_to=floodproof.elevation.value,
1034
- damage_function_types=self.damage_types,
1035
- vulnerability=self.model.vulnerability,
1036
- )
1037
-
1038
- # STATIC METHODS
1039
-
1040
- def get_buildings(self) -> gpd.GeoDataFrame:
1041
- """
1042
- Retrieve the building geometries from the FIAT model's exposure database.
1043
-
1044
- Returns
1045
- -------
1046
- gpd.GeoDataFrame
1047
- A GeoDataFrame containing the geometries of all buildings in the FIAT model.
1048
-
1049
- Raises
1050
- ------
1051
- ValueError
1052
- If the FIAT model does not have an exposure database initialized.
1053
- """
1054
- if self.model.exposure is None:
1055
- raise ValueError(
1056
- "FIAT model does not have exposure, make sure your model has been initialized."
1057
- )
1058
- gdf_0 = self.model.exposure.select_objects(
1059
- primary_object_type="ALL",
1060
- non_building_names=self.config.non_building_names,
1061
- return_gdf=True,
1062
- )
1063
- # Rename columns
1064
- name_translation = {}
1065
- for col in gdf_0.columns: # iterate through output columns
1066
- for field in list(self.impact_columns.model_fields): # check for each field
1067
- fiat_col = getattr(self.fiat_columns, field)
1068
- if matches_pattern(col, fiat_col):
1069
- impact_col = getattr(self.impact_columns, field)
1070
- new_col = replace_pattern(col, fiat_col, impact_col)
1071
- name_translation[col] = new_col # save mapping
1072
- gdf = gdf_0.rename(columns=name_translation)
1073
- return gdf
1074
-
1075
- def get_property_types(self) -> list:
1076
- """
1077
- Retrieve the list of property types from the FIAT model's exposure database.
1078
-
1079
- Returns
1080
- -------
1081
- list
1082
- A list of property types available in the FIAT model.
1083
-
1084
- Raises
1085
- ------
1086
- ValueError
1087
- If no property types are found in the FIAT model.
1088
- """
1089
- types = self.model.exposure.get_primary_object_type()
1090
- if types is None:
1091
- raise ValueError("No property types found in the FIAT model.")
1092
- types.append("all") # Add "all" type for using as identifier
1093
-
1094
- names = self.config.non_building_names
1095
- if names:
1096
- for name in names:
1097
- if name in types:
1098
- types.remove(name)
1099
-
1100
- return types
1101
-
1102
- def get_all_building_ids(self):
1103
- """
1104
- Retrieve the IDs of all buildings in the FIAT model.
1105
-
1106
- Returns
1107
- -------
1108
- list
1109
- A list of IDs for all buildings in the FIAT model.
1110
- """
1111
- # Get ids of existing buildings
1112
- ids = self.model.exposure.get_object_ids(
1113
- "all", non_building_names=self.config.non_building_names
1114
- )
1115
- return ids
1116
-
1117
- def get_object_ids(self, measure: Measure) -> list[Any]:
1118
- """
1119
- Retrieve the object IDs for a given impact measure.
1120
-
1121
- Parameters
1122
- ----------
1123
- measure : Measure
1124
- The impact measure for which to retrieve object IDs.
1125
-
1126
- Returns
1127
- -------
1128
- list[Any]
1129
- A list of object IDs that match the criteria of the given measure.
1130
-
1131
- Raises
1132
- ------
1133
- ValueError
1134
- If the measure type is not an impact measure.
1135
- """
1136
- if not MeasureType.is_impact(measure.type):
1137
- raise ValueError(
1138
- f"Measure type {measure.type} is not an impact measure. "
1139
- "Can only retrieve object ids for impact measures."
1140
- )
1141
-
1142
- # check if polygon file is used, then get the absolute path
1143
- if measure.polygon_file:
1144
- polygon_file = resolve_filepath(
1145
- object_dir=ObjectDir.measure,
1146
- obj_name=measure.name,
1147
- path=measure.polygon_file,
1148
- )
1149
- else:
1150
- polygon_file = None
1151
-
1152
- # use the hydromt-fiat method to the ids
1153
- ids = self.model.exposure.get_object_ids(
1154
- selection_type=measure.selection_type,
1155
- property_type=measure.property_type,
1156
- non_building_names=self.config.non_building_names,
1157
- aggregation=measure.aggregation_area_type,
1158
- aggregation_area_name=measure.aggregation_area_name,
1159
- polygon_file=str(polygon_file),
1160
- )
1161
-
1162
- return ids
1163
-
1164
- # POST-PROCESSING METHODS
1165
-
1166
- def add_exceedance_probability(
1167
- self, column: str, threshold: float, period: int
1168
- ) -> pd.DataFrame:
1169
- """Calculate exceedance probabilities and append them to the results table.
1170
-
1171
- Parameters
1172
- ----------
1173
- column : str
1174
- The name of the column to calculate exceedance probabilities for.
1175
- threshold : float
1176
- The threshold value for exceedance probability calculation.
1177
- period : int
1178
- The return period for exceedance probability calculation.
1179
-
1180
- Returns
1181
- -------
1182
- pd.DataFrame
1183
- The updated results table with exceedance probabilities appended.
1184
- """
1185
- self.logger.info("Calculating exceedance probabilities")
1186
- fiat_results_df = ExceedanceProbabilityCalculator(column).append_probability(
1187
- self.outputs["table"], threshold, period
1188
- )
1189
- self.outputs["table"] = fiat_results_df
1190
- return self.outputs["table"]
1191
-
1192
- def create_infometrics(
1193
- self, metric_config_paths: list[os.PathLike], metrics_output_path: os.PathLike
1194
- ) -> None:
1195
- """
1196
- Create infometrics files based on the provided metric configuration paths.
1197
-
1198
- Parameters
1199
- ----------
1200
- metric_config_paths : list[os.PathLike]
1201
- A list of paths to the metric configuration files.
1202
- metrics_output_path : os.PathLike
1203
- The path where the metrics output file will be saved.
1204
-
1205
- Raises
1206
- ------
1207
- FileNotFoundError
1208
- If a mandatory metric configuration file does not exist.
1209
- """
1210
- # Get the metrics configuration
1211
- self.logger.info("Calculating infometrics")
1212
-
1213
- # Write the metrics to file
1214
- # Check if type of metric configuration is available
1215
- for metric_file in metric_config_paths:
1216
- if metric_file.exists():
1217
- metrics_writer = MetricsFileWriter(
1218
- metric_file,
1219
- aggregation_label_fmt=self.impact_columns.aggregation_label,
1220
- )
1221
-
1222
- metrics_writer.parse_metrics_to_file(
1223
- df_results=self.outputs["table"],
1224
- metrics_path=metrics_output_path,
1225
- write_aggregate=None,
1226
- )
1227
-
1228
- metrics_writer.parse_metrics_to_file(
1229
- df_results=self.outputs["table"],
1230
- metrics_path=metrics_output_path,
1231
- write_aggregate="all",
1232
- )
1233
- else:
1234
- if "mandatory" in metric_file.name.lower():
1235
- raise FileNotFoundError(
1236
- f"Mandatory metric configuration file {metric_file} does not exist!"
1237
- )
1238
-
1239
- def create_infographics(
1240
- self,
1241
- name: str,
1242
- output_base_path: os.PathLike,
1243
- config_base_path: os.PathLike,
1244
- metrics_path: os.PathLike,
1245
- mode: Mode = Mode.single_event,
1246
- ):
1247
- """Create infographic files based on the provided metrics and configuration.
1248
-
1249
- Parameters
1250
- ----------
1251
- name : str
1252
- The name of the scenario.
1253
- output_base_path : os.PathLike
1254
- The base path where the output files will be saved.
1255
- config_base_path : os.PathLike
1256
- The base path where the configuration files are located.
1257
- metrics_path : os.PathLike
1258
- The path to the metrics file.
1259
- mode : Mode, optional
1260
- The mode of the infographic, by default Mode.single_event.
1261
- """
1262
- self.logger.info("Creating infographics")
1263
-
1264
- # Check if infographics config file exists
1265
- if mode == Mode.risk:
1266
- config_path = config_base_path.joinpath("config_risk_charts.toml")
1267
- if not config_path.exists():
1268
- self.logger.warning(
1269
- "Risk infographic cannot be created, since 'config_risk_charts.toml' is not available"
1270
- )
1271
- return
1272
-
1273
- # Get the infographic
1274
- InforgraphicFactory.create_infographic_file_writer(
1275
- infographic_mode=mode,
1276
- scenario_name=name,
1277
- metrics_full_path=metrics_path,
1278
- config_base_path=config_base_path,
1279
- output_base_path=output_base_path,
1280
- ).write_infographics_to_file()
1281
-
1282
- def add_equity(
1283
- self,
1284
- aggr_label: str,
1285
- metrics_path: os.PathLike,
1286
- damage_column_pattern: str = "TotalDamageRP{rp}",
1287
- gamma: float = 1.2,
1288
- ):
1289
- """Calculate equity-based damages for a given aggregation label.
1290
-
1291
- Parameters
1292
- ----------
1293
- aggr_label : str
1294
- The label of the aggregation area.
1295
- metrics_path : os.PathLike
1296
- The path to the metrics file.
1297
- damage_column_pattern : str, optional
1298
- The pattern for the damage column names, by default "TotalDamageRP{rp}".
1299
- gamma : float, optional
1300
- The equity weight parameter, by default 1.2
1301
- """
1302
- # TODO gamma in configuration file?
1303
-
1304
- ind = self._get_aggr_ind(aggr_label)
1305
- # TODO check what happens if aggr_label not in config
1306
-
1307
- if self.config.aggregation[ind].equity is None:
1308
- self.logger.warning(
1309
- f"Cannot calculate equity weighted risk for aggregation label: {aggr_label}, because equity inputs are not available."
1310
- )
1311
- return
1312
-
1313
- self.logger.info(
1314
- f"Calculating equity weighted risk for aggregation label: {aggr_label} "
1315
- )
1316
- metrics = pd.read_csv(metrics_path)
1317
- # Create Equity object
1318
- equity = Equity(
1319
- census_table=self.config_base_path.joinpath(
1320
- self.config.aggregation[ind].equity.census_data
1321
- ),
1322
- damages_table=metrics,
1323
- aggregation_label=self.config.aggregation[ind].field_name,
1324
- percapitaincome_label=self.config.aggregation[
1325
- ind
1326
- ].equity.percapitaincome_label,
1327
- totalpopulation_label=self.config.aggregation[
1328
- ind
1329
- ].equity.totalpopulation_label,
1330
- damage_column_pattern=damage_column_pattern,
1331
- )
1332
- # Calculate equity
1333
- df_equity = equity.equity_calculation(gamma)
1334
- # Merge with metrics tables and resave
1335
- metrics_new = metrics.merge(
1336
- df_equity,
1337
- left_on=metrics.columns[0],
1338
- right_on=self.config.aggregation[ind].field_name,
1339
- how="left",
1340
- )
1341
- del metrics_new[self.config.aggregation[ind].field_name]
1342
- metrics_new = metrics_new.set_index(metrics_new.columns[0])
1343
- metrics_new.loc["Description", ["EW", "EWEAD", "EWCEAD"]] = [
1344
- "Equity weight",
1345
- "Equity weighted expected annual damage",
1346
- "Equity weighted certainty equivalent annual damage",
1347
- ]
1348
- metrics_new.loc["Show In Metrics Table", ["EW", "EWEAD", "EWCEAD"]] = [
1349
- True,
1350
- True,
1351
- True,
1352
- ]
1353
- metrics_new.loc["Long Name", ["EW", "EWEAD", "EWCEAD"]] = [
1354
- "Equity weight",
1355
- "Equity weighted expected annual damage",
1356
- "Equity weighted certainty equivalent annual damage",
1357
- ]
1358
- metrics_new.index.name = None
1359
- metrics_new.to_csv(metrics_path)
1360
-
1361
- def save_aggregation_spatial(
1362
- self, aggr_label: str, metrics_path: os.PathLike, output_path: os.PathLike
1363
- ):
1364
- """
1365
- Save aggregated metrics to a spatial file.
1366
-
1367
- Parameters
1368
- ----------
1369
- aggr_label : str
1370
- The label of the aggregation area.
1371
- metrics_path : os.PathLike
1372
- The path to the metrics file.
1373
- output_path : os.PathLike
1374
- The path where the output spatial file will be saved.
1375
- """
1376
- self.logger.info(f"Saving impacts for aggregation areas type: '{aggr_label}'")
1377
-
1378
- metrics = pd.read_csv(metrics_path)
1379
-
1380
- # Load aggregation areas
1381
- ind = self._get_aggr_ind(aggr_label)
1382
-
1383
- aggr_areas_path = self.config_base_path.joinpath(
1384
- self.config.aggregation[ind].file
1385
- )
1386
-
1387
- aggr_areas = gpd.read_file(aggr_areas_path, engine="pyogrio")
1388
-
1389
- # Save file
1390
- AggregationAreas.write_spatial_file(
1391
- metrics,
1392
- aggr_areas,
1393
- output_path,
1394
- id_name=self.config.aggregation[ind].field_name,
1395
- file_format="geopackage",
1396
- )
1397
-
1398
- def save_building_footprints(self, output_path: os.PathLike):
1399
- """
1400
- Aggregate impacts at a building footprint level and then saves to an output file.
1401
-
1402
- Parameters
1403
- ----------
1404
- output_path : os.PathLike
1405
- The path where the output spatial file will be saved.
1406
-
1407
- Raises
1408
- ------
1409
- ValueError
1410
- If no building footprints are provided in the configuration.
1411
- """
1412
- self.logger.info("Calculating impacts at a building footprint scale")
1413
-
1414
- # Read the existing building points
1415
- buildings = self.model.exposure.select_objects(
1416
- primary_object_type="ALL",
1417
- non_building_names=self.config.non_building_names,
1418
- return_gdf=True,
1419
- )
1420
-
1421
- # Change names
1422
- buildings = buildings[[self.fiat_columns.object_id, "geometry"]]
1423
- buildings = buildings.rename(
1424
- columns={self.fiat_columns.object_id: self.impact_columns.object_id}
1425
- )
1426
-
1427
- # Get all results per building
1428
- fiat_results_df = gpd.GeoDataFrame(
1429
- self.outputs["table"].merge(
1430
- buildings,
1431
- on=self.impact_columns.object_id,
1432
- how="inner",
1433
- )
1434
- )
1435
-
1436
- # Check which footprint case we have
1437
- # If FIAT has points and external footprints are provided
1438
- if self.config.building_footprints:
1439
- method = "external_footprints"
1440
- # Get footprints file
1441
- footprints_path = self.config_base_path.joinpath(
1442
- self.config.building_footprints
1443
- )
1444
- # Read building footprints
1445
- footprints_gdf = gpd.read_file(footprints_path, engine="pyogrio")
1446
- field_name = "BF_FID"
1447
- # If FIAT has footprints already
1448
- elif all(buildings.geometry.geom_type.isin(["Polygon", "MultiPolygon"])):
1449
- method = "internal_footprints"
1450
- footprints_gdf = buildings[[self.impact_columns.object_id, "geometry"]]
1451
- field_name = self.impact_columns.object_id
1452
- # If FIAT has points and no external footprints are available
1453
- else:
1454
- method = "no_footprints"
1455
-
1456
- # Based on case follow different workflow
1457
- if method in ["external_footprints", "internal_footprints"]:
1458
- footprints = Footprints(
1459
- footprints=footprints_gdf,
1460
- fiat_columns=self.impact_columns,
1461
- field_name=field_name,
1462
- )
1463
- footprints.aggregate(fiat_results_df)
1464
- elif method == "no_footprints":
1465
- footprints = Footprints(fiat_columns=self.impact_columns)
1466
- footprints.set_point_data(fiat_results_df)
1467
-
1468
- # Normalize damages
1469
- footprints.calc_normalized_damages()
1470
-
1471
- # Save footprint
1472
- footprints.write(output_path)
1473
-
1474
- def save_roads(self, output_path: os.PathLike):
1475
- """
1476
- Save the impacts on roads to a spatial file.
1477
-
1478
- Parameters
1479
- ----------
1480
- output_path : os.PathLike
1481
- The path where the output spatial file will be saved.
1482
- """
1483
- self.logger.info("Calculating road impacts")
1484
- # Read roads spatial file
1485
- roads = gpd.read_file(
1486
- self.outputs["path"].joinpath(self.config.roads_file_name)
1487
- )
1488
- roads = roads.rename(columns=self.name_mapping)
1489
- # Get columns to use
1490
- aggr_cols = [
1491
- name
1492
- for name in self.outputs["table"].columns
1493
- if self.impact_columns.aggregation_label in name
1494
- ]
1495
- inun_cols = [
1496
- name
1497
- for name in roads.columns
1498
- if self.impact_columns.inundation_depth in name
1499
- ]
1500
- # Merge data
1501
- roads = roads[[self.impact_columns.object_id, "geometry"] + inun_cols].merge(
1502
- self.outputs["table"][
1503
- [self.impact_columns.object_id, self.impact_columns.primary_object_type]
1504
- + aggr_cols
1505
- ],
1506
- on=self.impact_columns.object_id,
1507
- )
1508
- # Save as geopackage
1509
- roads.to_file(output_path, driver="GPKG")
1510
-
1511
- @staticmethod
1512
- def _ensure_correct_hash_spacing_in_csv(
1513
- model_root: Path, hash_spacing: int = 1
1514
- ) -> None:
1515
- """
1516
- Ensure that the CSV file has the correct number of spaces between hashes.
1517
-
1518
- When writing csv files, FIAT does not add spaces between the hashes and the line, which leads to errors on linux.
1519
-
1520
-
1521
- Parameters
1522
- ----------
1523
- file_path : Path
1524
- The path to the model root.
1525
- hash_spacing : int, optional
1526
- The number of spaces between hashes, by default 1.
1527
- """
1528
- for dirpath, _, filenames in os.walk(model_root):
1529
- for filename in filenames:
1530
- if not filename.lower().endswith(".csv"):
1531
- continue
1532
- file_path = os.path.join(dirpath, filename)
1533
-
1534
- with open(file_path, "r") as file:
1535
- lines = file.readlines()
1536
-
1537
- with open(file_path, "w") as file:
1538
- for line in lines:
1539
- if line.startswith("#"):
1540
- line = "#" + " " * hash_spacing + line.lstrip("#")
1541
- file.write(line)
1
+ import logging
2
+ import math
3
+ import os
4
+ import shutil
5
+ import subprocess
6
+ from pathlib import Path
7
+ from typing import Any, Optional, Union
8
+
9
+ import geopandas as gpd
10
+ import pandas as pd
11
+ import tomli
12
+ from fiat_toolbox import FiatColumns, get_fiat_columns
13
+ from fiat_toolbox.equity.equity import Equity
14
+ from fiat_toolbox.infographics.infographics_factory import InforgraphicFactory
15
+ from fiat_toolbox.metrics_writer.fiat_write_metrics_file import MetricsFileWriter
16
+ from fiat_toolbox.metrics_writer.fiat_write_return_period_threshold import (
17
+ ExceedanceProbabilityCalculator,
18
+ )
19
+ from fiat_toolbox.spatial_output.aggregation_areas import AggregationAreas
20
+ from fiat_toolbox.spatial_output.footprints import Footprints
21
+ from fiat_toolbox.utils import extract_variables, matches_pattern, replace_pattern
22
+ from hydromt_fiat.fiat import FiatModel
23
+
24
+ from flood_adapt.adapter.interface.impact_adapter import IImpactAdapter
25
+ from flood_adapt.config.fiat import FiatConfigModel
26
+ from flood_adapt.misc.log import FloodAdaptLogging
27
+ from flood_adapt.misc.path_builder import (
28
+ ObjectDir,
29
+ )
30
+ from flood_adapt.misc.utils import cd, resolve_filepath
31
+ from flood_adapt.objects.events.events import Mode
32
+ from flood_adapt.objects.forcing import unit_system as us
33
+ from flood_adapt.objects.measures.measures import (
34
+ Buyout,
35
+ Elevate,
36
+ FloodProof,
37
+ Measure,
38
+ MeasureType,
39
+ )
40
+ from flood_adapt.objects.projections.projections import Projection
41
+ from flood_adapt.objects.scenarios.scenarios import Scenario
42
+ from flood_adapt.workflows.floodmap import FloodMap, FloodmapType
43
+ from flood_adapt.workflows.impacts_integrator import Impacts
44
+
45
+ # Define naming structure for saved files
46
+ _IMPACT_COLUMNS = FiatColumns(
47
+ object_id="Object ID",
48
+ object_name="Object Name",
49
+ primary_object_type="Primary Object Type",
50
+ secondary_object_type="Secondary Object Type",
51
+ extraction_method="Extraction Method",
52
+ ground_floor_height="Ground Floor Height",
53
+ ground_elevation="Ground Elevation",
54
+ damage_function="Damage Function: {name}",
55
+ max_potential_damage="Max Potential Damage: {name}",
56
+ aggregation_label="Aggregation Label: {name}",
57
+ inundation_depth="Inundation Depth",
58
+ inundation_depth_rp="Inundation Depth ({years}Y)",
59
+ reduction_factor="Reduction Factor",
60
+ reduction_factor_rp="Reduction Factor ({years}Y)",
61
+ damage="Damage: {name}",
62
+ damage_rp="Damage: {name} ({years}Y)",
63
+ total_damage="Total Damage",
64
+ total_damage_rp="Total Damage ({years}Y)",
65
+ risk_ead="Risk (EAD)",
66
+ segment_length="Segment Length",
67
+ )
68
+
69
+ # Define column naming of FIAT model
70
+ _FIAT_COLUMNS: FiatColumns = get_fiat_columns(
71
+ fiat_version="0.2.1"
72
+ ) # columns of FIAT # TODO add version from config
73
+
74
+
75
+ class FiatAdapter(IImpactAdapter):
76
+ """
77
+ ImpactAdapter for Delft-FIAT.
78
+
79
+ It includes:
80
+ - preprocessing methods for adding measures, projections and hazards
81
+ - executing method for running a Delft-FIAT simulation
82
+ - postprocessing methods for saving impact results
83
+ """
84
+
85
+ # TODO deal with all the relative paths for the files used
86
+ # TODO IImpactAdapter and general Adapter class should NOT use the database
87
+
88
+ _model: Optional[FiatModel] = None
89
+ config: Optional[FiatConfigModel] = None
90
+ exe_path: Optional[os.PathLike] = None
91
+ fiat_columns: FiatColumns
92
+ impact_columns: FiatColumns
93
+
94
+ def __init__(
95
+ self,
96
+ model_root: Path,
97
+ config: Optional[FiatConfigModel] = None,
98
+ exe_path: Optional[os.PathLike] = None,
99
+ delete_crashed_runs: bool = True,
100
+ config_base_path: Optional[os.PathLike] = None,
101
+ ) -> None:
102
+ # TODO should exe_path and delete_crashed_runs be part of the config?
103
+ # Load FIAT template
104
+ self.logger = FloodAdaptLogging.getLogger("FiatAdapter")
105
+ self.config = config
106
+ self.config_base_path = config_base_path
107
+ self.exe_path = exe_path
108
+ self.delete_crashed_runs = delete_crashed_runs
109
+ self._model_root = str(model_root.resolve())
110
+ self.fiat_columns = _FIAT_COLUMNS
111
+ self.impact_columns = _IMPACT_COLUMNS # columns of FA impact output
112
+
113
+ @property
114
+ def model(self) -> FiatModel:
115
+ """Lazily load and cache the FiatModel."""
116
+ if self._model is None:
117
+ self._model = FiatModel(root=self._model_root, mode="r")
118
+ self._model.read()
119
+ return self._model
120
+
121
+ @property
122
+ def model_root(self):
123
+ return Path(self.model.root)
124
+
125
+ @property
126
+ def damage_types(self):
127
+ """Get the damage types that are present in the exposure."""
128
+ types = []
129
+ for col in self.model.exposure.exposure_db.columns:
130
+ if matches_pattern(col, self.fiat_columns.damage_function):
131
+ name = extract_variables(col, self.fiat_columns.damage_function)["name"]
132
+ types.append(name)
133
+ return types
134
+
135
+ def read(self, path: Path) -> None:
136
+ """Read the fiat model from the current model root."""
137
+ if Path(self.model.root).resolve() != Path(path).resolve():
138
+ self.model.set_root(root=str(path), mode="r")
139
+ self.model.read()
140
+
141
+ def write(self, path_out: Union[str, os.PathLike], overwrite: bool = True) -> None:
142
+ """Write the fiat model configuration to a directory."""
143
+ if not isinstance(path_out, Path):
144
+ path_out = Path(path_out).resolve()
145
+
146
+ if not path_out.exists():
147
+ path_out.mkdir(parents=True)
148
+
149
+ write_mode = "w+" if overwrite else "w"
150
+ with cd(path_out):
151
+ self.model.set_root(root=str(path_out), mode=write_mode)
152
+ self.model.write()
153
+
154
+ def close_files(self):
155
+ """Close all open files and clean up file handles."""
156
+ if hasattr(self.logger, "handlers"):
157
+ for handler in self.logger.handlers:
158
+ if isinstance(handler, logging.FileHandler):
159
+ handler.close()
160
+ self.logger.removeHandler(handler)
161
+
162
+ def __enter__(self) -> "FiatAdapter":
163
+ return self
164
+
165
+ def __exit__(self, exc_type, exc_value, traceback) -> bool:
166
+ self.close_files()
167
+ return False
168
+
169
+ def has_run(self, scenario: Scenario) -> bool:
170
+ # TODO this should include a check for all output files , and then maybe save them as output paths and types
171
+ """
172
+ Check if the impact results file for the given scenario exists.
173
+
174
+ Parameters
175
+ ----------
176
+ scenario : Scenario
177
+ The scenario for which to check the FIAT results.
178
+
179
+ Returns
180
+ -------
181
+ bool
182
+ True if the FIAT results file exists, False otherwise.
183
+ """
184
+ impacts_path = Impacts(scenario=scenario).impacts_path
185
+
186
+ fiat_results_path = impacts_path.joinpath(
187
+ f"Impacts_detailed_{scenario.name}.csv"
188
+ )
189
+ return fiat_results_path.exists()
190
+
191
+ def delete_model(self):
192
+ """
193
+ Delete the Delft-FIAT simulation folder.
194
+
195
+ This method attempts to delete the directory specified by `self.model_root`.
196
+
197
+ Raises
198
+ ------
199
+ OSError: If the directory cannot be deleted.
200
+ """
201
+ self.logger.info("Deleting Delft-FIAT simulation folder")
202
+ try:
203
+ shutil.rmtree(self.model_root)
204
+ except OSError as e_info:
205
+ self.logger.warning(f"{e_info}\nCould not delete {self.model_root}.")
206
+
207
+ def fiat_completed(self) -> bool:
208
+ """Check if fiat has run as expected.
209
+
210
+ Returns
211
+ -------
212
+ boolean
213
+ True if fiat has run, False if something went wrong
214
+ """
215
+ log_file = self.model_root.joinpath(
216
+ self.model.config["output"]["path"], "fiat.log"
217
+ )
218
+ if not log_file.exists():
219
+ return False
220
+ try:
221
+ with open(log_file, "r", encoding="cp1252") as f:
222
+ return "Geom calculation are done!" in f.read()
223
+ except Exception as e:
224
+ self.logger.error(f"Error while checking if FIAT has run: {e}")
225
+ return False
226
+
227
+ def preprocess(self, scenario: Scenario) -> None:
228
+ """
229
+ Preprocess the FIAT-model given a scenario by setting up projections, measures, and hazards, and then saves any changes made to disk.
230
+
231
+ Args:
232
+ scenario (Scenario): The scenario to preprocess, which includes projection,
233
+ strategy, and hazard.
234
+
235
+ Returns
236
+ -------
237
+ None
238
+ """
239
+ self.logger.info("Pre-processing Delft-FIAT model")
240
+ # Projection
241
+ projection = self.database.projections.get(scenario.projection)
242
+ self.add_projection(projection)
243
+
244
+ # Measures
245
+ strategy = self.database.strategies.get(scenario.strategy)
246
+ for measure in strategy.get_impact_measures():
247
+ self.add_measure(measure)
248
+
249
+ # Hazard
250
+ floodmap = FloodMap(scenario.name)
251
+ var = "risk_maps" if floodmap.mode == Mode.risk else "zsmax"
252
+ is_risk = floodmap.mode == Mode.risk
253
+ self.set_hazard(
254
+ map_fn=floodmap.path,
255
+ map_type=floodmap.type,
256
+ var=var,
257
+ is_risk=is_risk,
258
+ units=us.UnitTypesLength.meters,
259
+ )
260
+
261
+ # Save any changes made to disk as well
262
+ output_path = Impacts(scenario).impacts_path / "fiat_model"
263
+ self.write(path_out=output_path)
264
+
265
+ def run(self, scenario) -> None:
266
+ """
267
+ Execute the full process for a given scenario, including preprocessing, executing the simulation, and postprocessing steps.
268
+
269
+ Args:
270
+ scenario: An object containing the scenario data.
271
+
272
+ Returns
273
+ -------
274
+ None
275
+ """
276
+ sim_path = Impacts(scenario=scenario).impacts_path / "fiat_model"
277
+
278
+ self.preprocess(scenario)
279
+ self.execute(sim_path)
280
+ self.postprocess(scenario)
281
+
282
+ def execute(
283
+ self,
284
+ path: Optional[os.PathLike] = None,
285
+ exe_path: Optional[os.PathLike] = None,
286
+ delete_crashed_runs: Optional[bool] = None,
287
+ strict=True,
288
+ ) -> bool:
289
+ """
290
+ Execute the FIAT model.
291
+
292
+ Parameters
293
+ ----------
294
+ path : Optional[os.PathLike], optional
295
+ The path to the model directory. If not provided, defaults to `self.model_root`.
296
+ exe_path : Optional[os.PathLike], optional
297
+ The path to the FIAT executable. If not provided, defaults to `self.exe_path`.
298
+ delete_crashed_runs : Optional[bool], optional
299
+ Whether to delete files from crashed runs. If not provided, defaults to `self.delete_crashed_runs`.
300
+ strict : bool, optional
301
+ Whether to raise an error if the FIAT model fails to run. Defaults to True.
302
+
303
+ Returns
304
+ -------
305
+ bool
306
+ True if the FIAT model run successfully, False otherwise.
307
+
308
+ Raises
309
+ ------
310
+ ValueError
311
+ If `exe_path` is not provided and `self.exe_path` is None.
312
+ RuntimeError
313
+ If the FIAT model fails to run and `strict` is True.
314
+ """
315
+ if path is None:
316
+ path = self.model_root
317
+ if exe_path is None:
318
+ if self.exe_path is None:
319
+ raise ValueError(
320
+ "'exe_path' needs to be provided either when calling FiatAdapter.execute() or during initialization of the FiatAdapter object."
321
+ )
322
+ exe_path = self.exe_path
323
+ if delete_crashed_runs is None:
324
+ delete_crashed_runs = self.delete_crashed_runs
325
+ path = Path(path)
326
+ fiat_log = path / "fiat.log"
327
+ with cd(path):
328
+ with FloodAdaptLogging.to_file(file_path=fiat_log):
329
+ FiatAdapter._ensure_correct_hash_spacing_in_csv(path)
330
+
331
+ self.logger.info(f"Running FIAT in {path}")
332
+ process = subprocess.run(
333
+ args=[Path(exe_path).resolve().as_posix(), "run", "settings.toml"],
334
+ stdout=subprocess.PIPE,
335
+ stderr=subprocess.PIPE,
336
+ text=True,
337
+ )
338
+ self.logger.debug(process.stdout)
339
+
340
+ if process.returncode != 0:
341
+ if delete_crashed_runs:
342
+ # Remove all files in the simulation folder except for the log files
343
+ for subdir, dirs, files in os.walk(path, topdown=False):
344
+ for file in files:
345
+ if not file.endswith(".log"):
346
+ os.remove(os.path.join(subdir, file))
347
+
348
+ if not os.listdir(subdir):
349
+ os.rmdir(subdir)
350
+
351
+ if strict:
352
+ raise RuntimeError(f"FIAT model failed to run in {path}.")
353
+ else:
354
+ self.logger.error(f"FIAT model failed to run in {path}.")
355
+
356
+ if process.returncode == 0:
357
+ self.read_outputs()
358
+
359
+ return process.returncode == 0
360
+
361
+ def read_outputs(self) -> None:
362
+ """
363
+ Read the output FIAT CSV file specified in the model configuration and stores the data in the `outputs` attribute.
364
+
365
+ Attributes
366
+ ----------
367
+ outputs : dict
368
+ A dictionary containing the following keys:
369
+ - "path" : Path
370
+ The path to the output directory.
371
+ - "table" : DataFrame
372
+ The contents of the output CSV file.
373
+ """
374
+ # Get output path
375
+ outputs_path = self.model_root.joinpath(self.model.config["output"]["path"])
376
+
377
+ # Get all csvs and concatenate them in a single table
378
+ csv_outputs_df = []
379
+ for output_csv in self.model.config["output"]["csv"]:
380
+ csv_path = outputs_path.joinpath(
381
+ self.model.config["output"]["csv"][output_csv]
382
+ )
383
+ output_csv_df = pd.read_csv(csv_path)
384
+ csv_outputs_df.append(output_csv_df)
385
+ output_csv = pd.concat(csv_outputs_df)
386
+
387
+ # Store them
388
+ self.outputs = {}
389
+ self.outputs["path"] = outputs_path
390
+ self.outputs["table"] = output_csv
391
+
392
+ def _get_aggr_ind(self, aggr_label: str):
393
+ """
394
+ Retrieve the index of the aggregation configuration that matches the given label.
395
+
396
+ Parameters
397
+ ----------
398
+ aggr_label : str
399
+ The label of the aggregation to find.
400
+
401
+ Returns
402
+ -------
403
+ int
404
+ The index of the aggregation configuration that matches the given label.
405
+
406
+ Raises
407
+ ------
408
+ IndexError
409
+ If no aggregation with the given label is found.
410
+ """
411
+ ind = [
412
+ i
413
+ for i, aggr in enumerate(self.config.aggregation)
414
+ if aggr.name == aggr_label
415
+ ][0]
416
+
417
+ return ind
418
+
419
+ def postprocess(self, scenario):
420
+ """
421
+ Post-process the results of the Delft-FIAT simulation for a given scenario.
422
+
423
+ Parameters
424
+ ----------
425
+ scenario : Scenario
426
+ The scenario object containing all relevant data and configurations.
427
+
428
+ Raises
429
+ ------
430
+ RuntimeError
431
+ If the Delft-FIAT simulation did not run successfully.
432
+
433
+ Post-processing steps include:
434
+ - Reading the outputs of the Delft-FIAT simulation.
435
+ - Adding exceedance probabilities for risk mode scenarios.
436
+ - Saving detailed impacts per object to a CSV file.
437
+ - Creating infometrics files based on different metric configurations.
438
+ - Generating infographic files if configured.
439
+ - Calculating equity-based damages for risk mode scenarios.
440
+ - Saving aggregated metrics to shapefiles.
441
+ - Merging points data to building footprints.
442
+ - Creating a roads spatial file if configured.
443
+ - Deleting the simulation folder if the site configuration is set to not keep the simulation.
444
+
445
+ Logging
446
+ -------
447
+ Logs the start and completion of the post-processing steps.
448
+ """
449
+ if not self.fiat_completed():
450
+ raise RuntimeError("Delft-FIAT did not run successfully!")
451
+
452
+ self.logger.info("Post-processing Delft-FIAT results")
453
+
454
+ if not self.outputs:
455
+ self.read_outputs()
456
+ mode = self.database.events.get(scenario.event).mode
457
+
458
+ # Define scenario output path
459
+ impacts = Impacts(scenario=scenario)
460
+ scenario_output_path = impacts.results_path
461
+ impacts_output_path = impacts.impacts_path
462
+
463
+ # Create column mapping to update column names
464
+ name_translation = {}
465
+ for col in self.outputs["table"].columns: # iterate through output columns
466
+ for field in list(self.impact_columns.model_fields): # check for each field
467
+ fiat_col = getattr(self.fiat_columns, field)
468
+ if matches_pattern(col, fiat_col):
469
+ impact_col = getattr(self.impact_columns, field)
470
+ new_col = replace_pattern(col, fiat_col, impact_col)
471
+ if (
472
+ ".0Y" in new_col
473
+ ): # TODO for now quick fix to account for float RP years, while metrics have integers
474
+ new_col = new_col.replace(".0Y", "Y")
475
+ name_translation[col] = new_col # save mapping
476
+ self.name_mapping = name_translation
477
+
478
+ # Rename save outputs
479
+ self.outputs["table"] = self.outputs["table"].rename(columns=self.name_mapping)
480
+
481
+ # Save impacts per object
482
+ fiat_results_path = impacts_output_path.joinpath(
483
+ f"Impacts_detailed_{scenario.name}.csv"
484
+ )
485
+ self.outputs["table"].to_csv(fiat_results_path, index=False)
486
+
487
+ # Add exceedance probabilities if needed (only for risk)
488
+ if mode == Mode.risk:
489
+ # Get config path
490
+ # TODO check where this configs should be read from
491
+ config_path = self.database.static_path.joinpath(
492
+ "templates", "infometrics", "metrics_additional_risk_configs.toml"
493
+ )
494
+ with open(config_path, mode="rb") as fp:
495
+ config = tomli.load(fp)["flood_exceedance"]
496
+ self.add_exceedance_probability(
497
+ column=config[
498
+ "column"
499
+ ], # TODO check how to the correct version of column
500
+ threshold=config["threshold"],
501
+ period=config["period"],
502
+ )
503
+
504
+ # Create the infometrics files
505
+ if mode == Mode.risk:
506
+ ext = "_risk"
507
+ else:
508
+ ext = ""
509
+
510
+ # Get options for metric configurations
511
+ metric_types = ["mandatory", "additional"] # these are checked always
512
+
513
+ if self.config.infographics: # if infographics are created
514
+ metric_types += ["infographic"]
515
+
516
+ metric_config_paths = [
517
+ self.database.static_path.joinpath(
518
+ "templates", "infometrics", f"{name}_metrics_config{ext}.toml"
519
+ )
520
+ for name in metric_types
521
+ ]
522
+
523
+ # Specify the metrics output path
524
+ metrics_outputs_path = scenario_output_path.joinpath(
525
+ f"Infometrics_{scenario.name}.csv"
526
+ )
527
+ self.create_infometrics(metric_config_paths, metrics_outputs_path)
528
+
529
+ # Get paths of created aggregated infometrics
530
+ aggr_metrics_paths = list(
531
+ metrics_outputs_path.parent.glob(f"{metrics_outputs_path.stem}_*.csv")
532
+ )
533
+
534
+ # Create the infographic files
535
+ if self.config.infographics:
536
+ config_base_path = self.database.static_path.joinpath(
537
+ "templates", "Infographics"
538
+ )
539
+ self.create_infographics(
540
+ name=scenario.name,
541
+ output_base_path=scenario_output_path,
542
+ config_base_path=config_base_path,
543
+ metrics_path=metrics_outputs_path,
544
+ mode=mode,
545
+ )
546
+
547
+ # Calculate equity based damages
548
+ if mode == Mode.risk:
549
+ for file in aggr_metrics_paths:
550
+ # Load metrics
551
+ aggr_label = file.stem.split(f"{metrics_outputs_path.stem}_")[-1]
552
+ self.add_equity(aggr_label=aggr_label, metrics_path=file)
553
+
554
+ # Save aggregated metrics to shapefiles
555
+ for file in aggr_metrics_paths:
556
+ aggr_label = file.stem.split(f"{metrics_outputs_path.stem}_")[-1]
557
+ output_path = impacts_output_path.joinpath(
558
+ f"Impacts_aggregated_{scenario.name}_{aggr_label}.gpkg"
559
+ )
560
+ self.save_aggregation_spatial(
561
+ aggr_label=aggr_label, metrics_path=file, output_path=output_path
562
+ )
563
+
564
+ # Merge points data to building footprints
565
+ self.save_building_footprints(
566
+ output_path=impacts_output_path.joinpath(
567
+ f"Impacts_building_footprints_{scenario.name}.gpkg"
568
+ )
569
+ )
570
+
571
+ # Create a roads spatial file
572
+ if self.config.roads_file_name:
573
+ self.save_roads(
574
+ output_path=impacts_output_path.joinpath(
575
+ f"Impacts_roads_{scenario.name}.gpkg"
576
+ )
577
+ )
578
+
579
+ self.logger.info("Delft-FIAT post-processing complete!")
580
+
581
+ # If site config is set to not keep FIAT simulation, delete folder
582
+ if not self.config.save_simulation:
583
+ self.delete_model()
584
+
585
+ def add_measure(self, measure: Measure):
586
+ """
587
+ Add and apply a specific impact measure to the properties of the FIAT model.
588
+
589
+ Parameters
590
+ ----------
591
+ measure : Measure
592
+ The impact measure to be applied. It can be of type Elevate, FloodProof, or Buyout.
593
+
594
+ Notes
595
+ -----
596
+ The method logs the application of the measure and calls the appropriate method based on the measure type:
597
+ - Elevate: Calls elevate_properties(measure)
598
+ - FloodProof: Calls floodproof_properties(measure)
599
+ - Buyout: Calls buyout_properties(measure)
600
+
601
+ If the measure type is unsupported, a warning is logged.
602
+ """
603
+ self.logger.info(f"Applying impact measure '{measure.name}'")
604
+ if isinstance(measure, Elevate):
605
+ self.elevate_properties(measure)
606
+ elif isinstance(measure, FloodProof):
607
+ self.floodproof_properties(measure)
608
+ elif isinstance(measure, Buyout):
609
+ self.buyout_properties(measure)
610
+ else:
611
+ self.logger.warning(
612
+ f"Skipping unsupported measure type {measure.__class__.__name__}"
613
+ )
614
+
615
+ def add_projection(self, projection: Projection):
616
+ """
617
+ Add the socioeconomic changes part of a projection to the FIAT model.
618
+
619
+ Parameters
620
+ ----------
621
+ projection : Projection
622
+ The projection object containing socioeconomic changes to be applied.
623
+
624
+ Notes
625
+ -----
626
+ - Economic growth is applied to all existing buildings if specified.
627
+ - New population growth areas are added if specified, taking into account
628
+ economic growth.
629
+ - Population growth is applied to existing objects if specified.
630
+ """
631
+ self.logger.info(
632
+ f"Applying socioeconomic changes from projection '{projection.name}'"
633
+ )
634
+ socio_economic_change = projection.socio_economic_change
635
+
636
+ ids_all_buildings = self.get_all_building_ids()
637
+
638
+ # Implement socioeconomic changes if needed
639
+ # First apply economic growth to existing objects
640
+ if not math.isclose(socio_economic_change.economic_growth, 0, abs_tol=1e-6):
641
+ self.apply_economic_growth(
642
+ economic_growth=socio_economic_change.economic_growth,
643
+ ids=ids_all_buildings, #
644
+ )
645
+
646
+ # Then the new population growth area is added if provided
647
+ # In the new areas, the economic growth is taken into account!
648
+ # Order matters since for the pop growth new, we only want the economic growth!
649
+ if not math.isclose(
650
+ socio_economic_change.population_growth_new, 0, abs_tol=1e-6
651
+ ):
652
+ # Get path of new development area geometry
653
+ area_path = resolve_filepath(
654
+ object_dir=ObjectDir.projection,
655
+ obj_name=projection.name,
656
+ path=socio_economic_change.new_development_shapefile,
657
+ )
658
+
659
+ # Get DEM location for assigning elevation to new areas
660
+ dem = (
661
+ self.database.static_path
662
+ / "dem"
663
+ / self.database.site.sfincs.dem.filename
664
+ )
665
+ # Call adapter method to add the new areas
666
+ self.apply_population_growth_new(
667
+ population_growth=socio_economic_change.population_growth_new,
668
+ ground_floor_height=socio_economic_change.new_development_elevation.value,
669
+ elevation_type=socio_economic_change.new_development_elevation.type,
670
+ area_path=area_path,
671
+ ground_elevation=dem,
672
+ )
673
+
674
+ # Then apply population growth to existing objects
675
+ if not math.isclose(
676
+ socio_economic_change.population_growth_existing, 0, abs_tol=1e-6
677
+ ):
678
+ self.apply_population_growth_existing(
679
+ population_growth=socio_economic_change.population_growth_existing,
680
+ ids=ids_all_buildings,
681
+ )
682
+
683
+ def set_hazard(
684
+ self,
685
+ map_fn: Union[os.PathLike, list[os.PathLike]],
686
+ map_type: FloodmapType,
687
+ var: str,
688
+ is_risk: bool = False,
689
+ units: str = us.UnitTypesLength.meters,
690
+ ) -> None:
691
+ """
692
+ Set the hazard map and type for the FIAT model.
693
+
694
+ Parameters
695
+ ----------
696
+ map_fn : str
697
+ The filename of the hazard map.
698
+ map_type : FloodmapType
699
+ The type of the flood map.
700
+ var : str
701
+ The variable name in the hazard map.
702
+ is_risk : bool, optional
703
+ Flag indicating if the map is a risk output. Defaults to False.
704
+ units : str, optional
705
+ The units of the hazard map. Defaults to us.UnitTypesLength.meters.
706
+ """
707
+ self.logger.info(f"Setting hazard to the {map_type} map {map_fn}")
708
+ # Add the floodmap data to a data catalog with the unit conversion
709
+ wl_current_units = us.UnitfulLength(value=1.0, units=units)
710
+ conversion_factor = wl_current_units.convert(self.model.exposure.unit)
711
+
712
+ self.model.setup_hazard(
713
+ map_fn=map_fn,
714
+ map_type=map_type,
715
+ rp=None,
716
+ crs=None, # change this in new version (maybe to str(floodmap.crs.split(':')[1]))
717
+ nodata=-999, # change this in new version
718
+ var=var,
719
+ chunks="auto",
720
+ risk_output=is_risk,
721
+ unit_conversion_factor=conversion_factor,
722
+ )
723
+
724
+ # PROJECTIONS
725
+
726
+ def apply_economic_growth(
727
+ self, economic_growth: float, ids: Optional[list] = None
728
+ ) -> None:
729
+ """
730
+ Apply economic growth to the FIAT-Model by adjusting the maximum potential damage values in the exposure database.
731
+
732
+ This method updates the max potential damage values in the exposure database by
733
+ applying a given economic growth rate. It can optionally filter the updates to
734
+ specific objects identified by their IDs.
735
+
736
+ Parameters
737
+ ----------
738
+ economic_growth : float
739
+ The economic growth rate to apply, expressed as a percentage.
740
+ ids : Optional[list], default=None
741
+ A list of object IDs to which the economic growth should be applied. If None, the growth is applied to all buildings.
742
+ """
743
+ self.logger.info(f"Applying economic growth of {economic_growth} %.")
744
+ # Get columns that include max damage
745
+ damage_cols = [
746
+ c
747
+ for c in self.model.exposure.exposure_db.columns
748
+ if matches_pattern(c, self.fiat_columns.max_potential_damage)
749
+ ]
750
+
751
+ # Get objects that are buildings (using site info)
752
+ buildings_rows = ~self.model.exposure.exposure_db[
753
+ self.fiat_columns.primary_object_type
754
+ ].isin(self.config.non_building_names)
755
+
756
+ # If ids are given use that as an additional filter
757
+ if ids:
758
+ buildings_rows = buildings_rows & self.model.exposure.exposure_db[
759
+ self.fiat_columns.object_id
760
+ ].isin(ids)
761
+
762
+ # Update columns using economic growth value
763
+ updated_max_pot_damage = self.model.exposure.exposure_db.copy()
764
+ updated_max_pot_damage.loc[buildings_rows, damage_cols] *= (
765
+ 1.0 + economic_growth / 100.0
766
+ )
767
+
768
+ # update fiat model
769
+ self.model.exposure.update_max_potential_damage(
770
+ updated_max_potential_damages=updated_max_pot_damage
771
+ )
772
+
773
+ def apply_population_growth_existing(
774
+ self, population_growth: float, ids: Optional[list[str]] = None
775
+ ) -> None:
776
+ """
777
+ Apply population growth to the FIAT-Model by adjusting the existing max potential damage values for buildings.
778
+
779
+ This method updates the max potential damage values in the exposure database by
780
+ applying a given population growth rate. It can optionally filter the updates to
781
+ specific objects identified by their IDs.
782
+
783
+ Parameters
784
+ ----------
785
+ population_growth : float
786
+ The population growth rate as a percentage.
787
+ ids : Optional[list[str]]
788
+ A list of object IDs to filter the updates. If None, the updates are applied to all buildings.
789
+ """
790
+ self.logger.info(f"Applying population growth of {population_growth} %.")
791
+ # Get columns that include max damage
792
+ damage_cols = [
793
+ c
794
+ for c in self.model.exposure.exposure_db.columns
795
+ if matches_pattern(c, self.fiat_columns.max_potential_damage)
796
+ ]
797
+
798
+ # Get objects that are buildings (using site info)
799
+ buildings_rows = ~self.model.exposure.exposure_db[
800
+ self.fiat_columns.primary_object_type
801
+ ].isin(self.config.non_building_names)
802
+
803
+ # If ids are given use that as an additional filter
804
+ if ids:
805
+ buildings_rows = buildings_rows & self.model.exposure.exposure_db[
806
+ self.fiat_columns.object_id
807
+ ].isin(ids)
808
+
809
+ # Update columns using economic growth value
810
+ updated_max_pot_damage = self.model.exposure.exposure_db.copy()
811
+ updated_max_pot_damage.loc[buildings_rows, damage_cols] *= (
812
+ 1.0 + population_growth / 100.0
813
+ )
814
+
815
+ # update fiat model
816
+ self.model.exposure.update_max_potential_damage(
817
+ updated_max_potential_damages=updated_max_pot_damage
818
+ )
819
+
820
+ def apply_population_growth_new(
821
+ self,
822
+ population_growth: float,
823
+ ground_floor_height: float,
824
+ elevation_type: str,
825
+ area_path: str,
826
+ ground_elevation: Union[None, str, Path] = None,
827
+ ) -> None:
828
+ """
829
+ Apply population growth in a new area by adding new objects in the model.
830
+
831
+ Parameters
832
+ ----------
833
+ population_growth : float
834
+ The percentage of population growth to apply.
835
+ ground_floor_height : float
836
+ The height of the ground floor.
837
+ elevation_type : str
838
+ The type of elevation reference to use. Must be either 'floodmap' or 'datum'.
839
+ area_path : str
840
+ The path to the area file.
841
+ ground_elevation : Union[None, str, Path], optional
842
+ The ground elevation reference. Default is None.
843
+
844
+ Raises
845
+ ------
846
+ ValueError
847
+ If `elevation_type` is 'floodmap' and base flood elevation (bfe) map is not provided.
848
+ If `elevation_type` is not 'floodmap' or 'datum'.
849
+ """
850
+ self.logger.info(
851
+ f"Applying population growth of {population_growth} %, by creating a new development area using the geometries from {area_path} and a ground floor height of {ground_floor_height} {self.model.exposure.unit} above '{elevation_type}'."
852
+ )
853
+ # Get reference type to align with hydromt
854
+ if elevation_type == "floodmap":
855
+ if not self.config.bfe:
856
+ raise ValueError(
857
+ "Base flood elevation (bfe) map is required to use 'floodmap' as reference."
858
+ )
859
+ kwargs = {
860
+ "elevation_reference": "geom",
861
+ "path_ref": self.database.static_path.joinpath(self.config.bfe.geom),
862
+ "attr_ref": self.config.bfe.field_name,
863
+ }
864
+ elif elevation_type == "datum":
865
+ kwargs = {"elevation_reference": "datum"}
866
+ else:
867
+ raise ValueError("elevation type can only be one of 'floodmap' or 'datum'")
868
+ # Get aggregation areas info
869
+ aggregation_areas = [
870
+ self.database.static_path.joinpath(aggr.file)
871
+ for aggr in self.config.aggregation
872
+ ]
873
+ attribute_names = [aggr.field_name for aggr in self.config.aggregation]
874
+ label_names = [
875
+ self.fiat_columns.aggregation_label.format(name=aggr.name)
876
+ for aggr in self.config.aggregation
877
+ ]
878
+ new_dev_geom_name = Path(self.config.new_development_file_name).stem
879
+ # Use hydromt function
880
+ self.model.exposure.setup_new_composite_areas(
881
+ percent_growth=population_growth,
882
+ geom_file=Path(area_path),
883
+ ground_floor_height=ground_floor_height,
884
+ damage_types=self.damage_types,
885
+ vulnerability=self.model.vulnerability,
886
+ ground_elevation=ground_elevation,
887
+ aggregation_area_fn=aggregation_areas,
888
+ attribute_names=attribute_names,
889
+ label_names=label_names,
890
+ geom_name=new_dev_geom_name,
891
+ **kwargs,
892
+ )
893
+
894
+ # MEASURES
895
+ @staticmethod
896
+ def _get_area_name(measure: Measure):
897
+ """
898
+ Determine the area name based on the selection type of the measure.
899
+
900
+ Parameters
901
+ ----------
902
+ measure : Measure
903
+ An instance of Measure containing attributes that define the selection type and area.
904
+
905
+ Returns
906
+ -------
907
+ str
908
+ The name of the area. It returns the aggregation area name if the selection type is "aggregation_area",
909
+ the polygon file name if the selection type is "polygon", and "all" for any other selection type.
910
+ """
911
+ if measure.selection_type == "aggregation_area":
912
+ area = measure.aggregation_area_name
913
+ elif measure.selection_type == "polygon":
914
+ area = measure.polygon_file
915
+ else:
916
+ area = "all"
917
+ return area
918
+
919
+ def elevate_properties(self, elevate: Elevate) -> None:
920
+ """
921
+ Elevate the ground floor height of properties based on the provided Elevate measure.
922
+
923
+ Parameters
924
+ ----------
925
+ elevate : Elevate
926
+ The Elevate measure containing the elevation details.
927
+
928
+ Raises
929
+ ------
930
+ ValueError
931
+ If the elevation type is 'floodmap' and the base flood elevation (bfe) map is not provided.
932
+ If the elevation type is not 'floodmap' or 'datum'.
933
+ """
934
+ area = self._get_area_name(elevate)
935
+ self.logger.info(
936
+ f"Elevating '{elevate.property_type}' type properties in '{area}' by {elevate.elevation} relative to '{elevate.elevation.type}'."
937
+ )
938
+ # If ids are given use that as an additional filter
939
+ objectids = self.get_object_ids(elevate)
940
+
941
+ # Get reference type to align with hydromt
942
+ if elevate.elevation.type == "floodmap":
943
+ if not self.config.bfe:
944
+ raise ValueError(
945
+ "Base flood elevation (bfe) map is required to use 'floodmap' as reference."
946
+ )
947
+ if self.config.bfe.table:
948
+ path_ref = self.config_base_path.joinpath(self.config.bfe.table)
949
+ height_reference = "table"
950
+ else:
951
+ path_ref = self.config_base_path.joinpath(self.config.bfe.geom)
952
+ height_reference = "geom"
953
+ # Use hydromt function
954
+ self.model.exposure.raise_ground_floor_height(
955
+ raise_by=elevate.elevation.value,
956
+ objectids=objectids,
957
+ height_reference=height_reference,
958
+ path_ref=path_ref,
959
+ attr_ref=self.config.bfe.field_name,
960
+ )
961
+
962
+ elif elevate.elevation.type == "datum":
963
+ # Use hydromt function
964
+ self.model.exposure.raise_ground_floor_height(
965
+ raise_by=elevate.elevation.value,
966
+ objectids=objectids,
967
+ height_reference="datum",
968
+ )
969
+ else:
970
+ raise ValueError("elevation type can only be one of 'floodmap' or 'datum'")
971
+
972
+ def buyout_properties(self, buyout: Buyout) -> None:
973
+ """
974
+ Apply the buyout measure to the properties by setting their maximum potential damage to zero.
975
+
976
+ Parameters
977
+ ----------
978
+ buyout : Buyout
979
+ The Buyout measure containing the details of the properties to be bought out.
980
+
981
+ """
982
+ area = self._get_area_name(buyout)
983
+ self.logger.info(
984
+ f"Buying-out '{buyout.property_type}' type properties in '{area}'."
985
+ )
986
+ # Get columns that include max damage
987
+ damage_cols = [
988
+ c
989
+ for c in self.model.exposure.exposure_db.columns
990
+ if matches_pattern(c, self.fiat_columns.max_potential_damage)
991
+ ]
992
+
993
+ # Get objects that are buildings (using site info)
994
+ buildings_rows = ~self.model.exposure.exposure_db[
995
+ self.fiat_columns.primary_object_type
996
+ ].isin(self.config.non_building_names)
997
+
998
+ # Get rows that are affected
999
+ objectids = self.get_object_ids(buyout)
1000
+ rows = (
1001
+ self.model.exposure.exposure_db[self.fiat_columns.object_id].isin(objectids)
1002
+ & buildings_rows
1003
+ )
1004
+
1005
+ # Update columns
1006
+ updated_max_pot_damage = self.model.exposure.exposure_db.copy()
1007
+ updated_max_pot_damage.loc[rows, damage_cols] *= 0
1008
+
1009
+ # update fiat model
1010
+ self.model.exposure.update_max_potential_damage(
1011
+ updated_max_potential_damages=updated_max_pot_damage
1012
+ )
1013
+
1014
+ def floodproof_properties(self, floodproof: FloodProof) -> None:
1015
+ """
1016
+ Apply floodproofing measures to the properties by truncating the damage function.
1017
+
1018
+ Parameters
1019
+ ----------
1020
+ floodproof : FloodProof
1021
+ The FloodProof measure containing the details of the properties to be floodproofed.
1022
+ """
1023
+ area = self._get_area_name(floodproof)
1024
+ self.logger.info(
1025
+ f"Flood-proofing '{floodproof.property_type}' type properties in '{area}' by {floodproof.elevation}."
1026
+ )
1027
+ # If ids are given use that as an additional filter
1028
+ objectids = self.get_object_ids(floodproof)
1029
+
1030
+ # Use hydromt function
1031
+ self.model.exposure.truncate_damage_function(
1032
+ objectids=objectids,
1033
+ floodproof_to=floodproof.elevation.value,
1034
+ damage_function_types=self.damage_types,
1035
+ vulnerability=self.model.vulnerability,
1036
+ )
1037
+
1038
+ # STATIC METHODS
1039
+
1040
+ def get_buildings(self) -> gpd.GeoDataFrame:
1041
+ """
1042
+ Retrieve the building geometries from the FIAT model's exposure database.
1043
+
1044
+ Returns
1045
+ -------
1046
+ gpd.GeoDataFrame
1047
+ A GeoDataFrame containing the geometries of all buildings in the FIAT model.
1048
+
1049
+ Raises
1050
+ ------
1051
+ ValueError
1052
+ If the FIAT model does not have an exposure database initialized.
1053
+ """
1054
+ if self.model.exposure is None:
1055
+ raise ValueError(
1056
+ "FIAT model does not have exposure, make sure your model has been initialized."
1057
+ )
1058
+ gdf_0 = self.model.exposure.select_objects(
1059
+ primary_object_type="ALL",
1060
+ non_building_names=self.config.non_building_names,
1061
+ return_gdf=True,
1062
+ )
1063
+ # Rename columns
1064
+ name_translation = {}
1065
+ for col in gdf_0.columns: # iterate through output columns
1066
+ for field in list(self.impact_columns.model_fields): # check for each field
1067
+ fiat_col = getattr(self.fiat_columns, field)
1068
+ if matches_pattern(col, fiat_col):
1069
+ impact_col = getattr(self.impact_columns, field)
1070
+ new_col = replace_pattern(col, fiat_col, impact_col)
1071
+ name_translation[col] = new_col # save mapping
1072
+ gdf = gdf_0.rename(columns=name_translation)
1073
+ return gdf
1074
+
1075
+ def get_property_types(self) -> list:
1076
+ """
1077
+ Retrieve the list of property types from the FIAT model's exposure database.
1078
+
1079
+ Returns
1080
+ -------
1081
+ list
1082
+ A list of property types available in the FIAT model.
1083
+
1084
+ Raises
1085
+ ------
1086
+ ValueError
1087
+ If no property types are found in the FIAT model.
1088
+ """
1089
+ types = self.model.exposure.get_primary_object_type()
1090
+ if types is None:
1091
+ raise ValueError("No property types found in the FIAT model.")
1092
+ types.append("all") # Add "all" type for using as identifier
1093
+
1094
+ names = self.config.non_building_names
1095
+ if names:
1096
+ for name in names:
1097
+ if name in types:
1098
+ types.remove(name)
1099
+
1100
+ return types
1101
+
1102
+ def get_all_building_ids(self):
1103
+ """
1104
+ Retrieve the IDs of all buildings in the FIAT model.
1105
+
1106
+ Returns
1107
+ -------
1108
+ list
1109
+ A list of IDs for all buildings in the FIAT model.
1110
+ """
1111
+ # Get ids of existing buildings
1112
+ ids = self.model.exposure.get_object_ids(
1113
+ "all", non_building_names=self.config.non_building_names
1114
+ )
1115
+ return ids
1116
+
1117
+ def get_object_ids(self, measure: Measure) -> list[Any]:
1118
+ """
1119
+ Retrieve the object IDs for a given impact measure.
1120
+
1121
+ Parameters
1122
+ ----------
1123
+ measure : Measure
1124
+ The impact measure for which to retrieve object IDs.
1125
+
1126
+ Returns
1127
+ -------
1128
+ list[Any]
1129
+ A list of object IDs that match the criteria of the given measure.
1130
+
1131
+ Raises
1132
+ ------
1133
+ ValueError
1134
+ If the measure type is not an impact measure.
1135
+ """
1136
+ if not MeasureType.is_impact(measure.type):
1137
+ raise ValueError(
1138
+ f"Measure type {measure.type} is not an impact measure. "
1139
+ "Can only retrieve object ids for impact measures."
1140
+ )
1141
+
1142
+ # check if polygon file is used, then get the absolute path
1143
+ if measure.polygon_file:
1144
+ polygon_file = resolve_filepath(
1145
+ object_dir=ObjectDir.measure,
1146
+ obj_name=measure.name,
1147
+ path=measure.polygon_file,
1148
+ )
1149
+ else:
1150
+ polygon_file = None
1151
+
1152
+ # use the hydromt-fiat method to the ids
1153
+ ids = self.model.exposure.get_object_ids(
1154
+ selection_type=measure.selection_type,
1155
+ property_type=measure.property_type,
1156
+ non_building_names=self.config.non_building_names,
1157
+ aggregation=measure.aggregation_area_type,
1158
+ aggregation_area_name=measure.aggregation_area_name,
1159
+ polygon_file=str(polygon_file),
1160
+ )
1161
+
1162
+ return ids
1163
+
1164
+ # POST-PROCESSING METHODS
1165
+
1166
+ def add_exceedance_probability(
1167
+ self, column: str, threshold: float, period: int
1168
+ ) -> pd.DataFrame:
1169
+ """Calculate exceedance probabilities and append them to the results table.
1170
+
1171
+ Parameters
1172
+ ----------
1173
+ column : str
1174
+ The name of the column to calculate exceedance probabilities for.
1175
+ threshold : float
1176
+ The threshold value for exceedance probability calculation.
1177
+ period : int
1178
+ The return period for exceedance probability calculation.
1179
+
1180
+ Returns
1181
+ -------
1182
+ pd.DataFrame
1183
+ The updated results table with exceedance probabilities appended.
1184
+ """
1185
+ self.logger.info("Calculating exceedance probabilities")
1186
+ fiat_results_df = ExceedanceProbabilityCalculator(column).append_probability(
1187
+ self.outputs["table"], threshold, period
1188
+ )
1189
+ self.outputs["table"] = fiat_results_df
1190
+ return self.outputs["table"]
1191
+
1192
+ def create_infometrics(
1193
+ self, metric_config_paths: list[os.PathLike], metrics_output_path: os.PathLike
1194
+ ) -> None:
1195
+ """
1196
+ Create infometrics files based on the provided metric configuration paths.
1197
+
1198
+ Parameters
1199
+ ----------
1200
+ metric_config_paths : list[os.PathLike]
1201
+ A list of paths to the metric configuration files.
1202
+ metrics_output_path : os.PathLike
1203
+ The path where the metrics output file will be saved.
1204
+
1205
+ Raises
1206
+ ------
1207
+ FileNotFoundError
1208
+ If a mandatory metric configuration file does not exist.
1209
+ """
1210
+ # Get the metrics configuration
1211
+ self.logger.info("Calculating infometrics")
1212
+
1213
+ # Write the metrics to file
1214
+ # Check if type of metric configuration is available
1215
+ for metric_file in metric_config_paths:
1216
+ if metric_file.exists():
1217
+ metrics_writer = MetricsFileWriter(
1218
+ metric_file,
1219
+ aggregation_label_fmt=self.impact_columns.aggregation_label,
1220
+ )
1221
+
1222
+ metrics_writer.parse_metrics_to_file(
1223
+ df_results=self.outputs["table"],
1224
+ metrics_path=metrics_output_path,
1225
+ write_aggregate=None,
1226
+ )
1227
+
1228
+ metrics_writer.parse_metrics_to_file(
1229
+ df_results=self.outputs["table"],
1230
+ metrics_path=metrics_output_path,
1231
+ write_aggregate="all",
1232
+ )
1233
+ else:
1234
+ if "mandatory" in metric_file.name.lower():
1235
+ raise FileNotFoundError(
1236
+ f"Mandatory metric configuration file {metric_file} does not exist!"
1237
+ )
1238
+
1239
+ def create_infographics(
1240
+ self,
1241
+ name: str,
1242
+ output_base_path: os.PathLike,
1243
+ config_base_path: os.PathLike,
1244
+ metrics_path: os.PathLike,
1245
+ mode: Mode = Mode.single_event,
1246
+ ):
1247
+ """Create infographic files based on the provided metrics and configuration.
1248
+
1249
+ Parameters
1250
+ ----------
1251
+ name : str
1252
+ The name of the scenario.
1253
+ output_base_path : os.PathLike
1254
+ The base path where the output files will be saved.
1255
+ config_base_path : os.PathLike
1256
+ The base path where the configuration files are located.
1257
+ metrics_path : os.PathLike
1258
+ The path to the metrics file.
1259
+ mode : Mode, optional
1260
+ The mode of the infographic, by default Mode.single_event.
1261
+ """
1262
+ self.logger.info("Creating infographics")
1263
+
1264
+ # Check if infographics config file exists
1265
+ if mode == Mode.risk:
1266
+ config_path = config_base_path.joinpath("config_risk_charts.toml")
1267
+ if not config_path.exists():
1268
+ self.logger.warning(
1269
+ "Risk infographic cannot be created, since 'config_risk_charts.toml' is not available"
1270
+ )
1271
+ return
1272
+
1273
+ # Get the infographic
1274
+ InforgraphicFactory.create_infographic_file_writer(
1275
+ infographic_mode=mode,
1276
+ scenario_name=name,
1277
+ metrics_full_path=metrics_path,
1278
+ config_base_path=config_base_path,
1279
+ output_base_path=output_base_path,
1280
+ ).write_infographics_to_file()
1281
+
1282
+ def add_equity(
1283
+ self,
1284
+ aggr_label: str,
1285
+ metrics_path: os.PathLike,
1286
+ damage_column_pattern: str = "TotalDamageRP{rp}",
1287
+ gamma: float = 1.2,
1288
+ ):
1289
+ """Calculate equity-based damages for a given aggregation label.
1290
+
1291
+ Parameters
1292
+ ----------
1293
+ aggr_label : str
1294
+ The label of the aggregation area.
1295
+ metrics_path : os.PathLike
1296
+ The path to the metrics file.
1297
+ damage_column_pattern : str, optional
1298
+ The pattern for the damage column names, by default "TotalDamageRP{rp}".
1299
+ gamma : float, optional
1300
+ The equity weight parameter, by default 1.2
1301
+ """
1302
+ # TODO gamma in configuration file?
1303
+
1304
+ ind = self._get_aggr_ind(aggr_label)
1305
+ # TODO check what happens if aggr_label not in config
1306
+
1307
+ if self.config.aggregation[ind].equity is None:
1308
+ self.logger.warning(
1309
+ f"Cannot calculate equity weighted risk for aggregation label: {aggr_label}, because equity inputs are not available."
1310
+ )
1311
+ return
1312
+
1313
+ self.logger.info(
1314
+ f"Calculating equity weighted risk for aggregation label: {aggr_label} "
1315
+ )
1316
+ metrics = pd.read_csv(metrics_path)
1317
+ # Create Equity object
1318
+ equity = Equity(
1319
+ census_table=self.config_base_path.joinpath(
1320
+ self.config.aggregation[ind].equity.census_data
1321
+ ),
1322
+ damages_table=metrics,
1323
+ aggregation_label=self.config.aggregation[ind].field_name,
1324
+ percapitaincome_label=self.config.aggregation[
1325
+ ind
1326
+ ].equity.percapitaincome_label,
1327
+ totalpopulation_label=self.config.aggregation[
1328
+ ind
1329
+ ].equity.totalpopulation_label,
1330
+ damage_column_pattern=damage_column_pattern,
1331
+ )
1332
+ # Calculate equity
1333
+ df_equity = equity.equity_calculation(gamma)
1334
+ # Merge with metrics tables and resave
1335
+ metrics_new = metrics.merge(
1336
+ df_equity,
1337
+ left_on=metrics.columns[0],
1338
+ right_on=self.config.aggregation[ind].field_name,
1339
+ how="left",
1340
+ )
1341
+ del metrics_new[self.config.aggregation[ind].field_name]
1342
+ metrics_new = metrics_new.set_index(metrics_new.columns[0])
1343
+ metrics_new.loc["Description", ["EW", "EWEAD", "EWCEAD"]] = [
1344
+ "Equity weight",
1345
+ "Equity weighted expected annual damage",
1346
+ "Equity weighted certainty equivalent annual damage",
1347
+ ]
1348
+ metrics_new.loc["Show In Metrics Table", ["EW", "EWEAD", "EWCEAD"]] = [
1349
+ True,
1350
+ True,
1351
+ True,
1352
+ ]
1353
+ metrics_new.loc["Long Name", ["EW", "EWEAD", "EWCEAD"]] = [
1354
+ "Equity weight",
1355
+ "Equity weighted expected annual damage",
1356
+ "Equity weighted certainty equivalent annual damage",
1357
+ ]
1358
+ metrics_new.index.name = None
1359
+ metrics_new.to_csv(metrics_path)
1360
+
1361
+ def save_aggregation_spatial(
1362
+ self, aggr_label: str, metrics_path: os.PathLike, output_path: os.PathLike
1363
+ ):
1364
+ """
1365
+ Save aggregated metrics to a spatial file.
1366
+
1367
+ Parameters
1368
+ ----------
1369
+ aggr_label : str
1370
+ The label of the aggregation area.
1371
+ metrics_path : os.PathLike
1372
+ The path to the metrics file.
1373
+ output_path : os.PathLike
1374
+ The path where the output spatial file will be saved.
1375
+ """
1376
+ self.logger.info(f"Saving impacts for aggregation areas type: '{aggr_label}'")
1377
+
1378
+ metrics = pd.read_csv(metrics_path)
1379
+
1380
+ # Load aggregation areas
1381
+ ind = self._get_aggr_ind(aggr_label)
1382
+
1383
+ aggr_areas_path = self.config_base_path.joinpath(
1384
+ self.config.aggregation[ind].file
1385
+ )
1386
+
1387
+ aggr_areas = gpd.read_file(aggr_areas_path, engine="pyogrio")
1388
+
1389
+ # Save file
1390
+ AggregationAreas.write_spatial_file(
1391
+ metrics,
1392
+ aggr_areas,
1393
+ output_path,
1394
+ id_name=self.config.aggregation[ind].field_name,
1395
+ file_format="geopackage",
1396
+ )
1397
+
1398
+ def save_building_footprints(self, output_path: os.PathLike):
1399
+ """
1400
+ Aggregate impacts at a building footprint level and then saves to an output file.
1401
+
1402
+ Parameters
1403
+ ----------
1404
+ output_path : os.PathLike
1405
+ The path where the output spatial file will be saved.
1406
+
1407
+ Raises
1408
+ ------
1409
+ ValueError
1410
+ If no building footprints are provided in the configuration.
1411
+ """
1412
+ self.logger.info("Calculating impacts at a building footprint scale")
1413
+
1414
+ # Read the existing building points
1415
+ buildings = self.model.exposure.select_objects(
1416
+ primary_object_type="ALL",
1417
+ non_building_names=self.config.non_building_names,
1418
+ return_gdf=True,
1419
+ )
1420
+
1421
+ # Change names
1422
+ buildings = buildings[[self.fiat_columns.object_id, "geometry"]]
1423
+ buildings = buildings.rename(
1424
+ columns={self.fiat_columns.object_id: self.impact_columns.object_id}
1425
+ )
1426
+
1427
+ # Get all results per building
1428
+ fiat_results_df = gpd.GeoDataFrame(
1429
+ self.outputs["table"].merge(
1430
+ buildings,
1431
+ on=self.impact_columns.object_id,
1432
+ how="inner",
1433
+ )
1434
+ )
1435
+
1436
+ # Check which footprint case we have
1437
+ # If FIAT has points and external footprints are provided
1438
+ if self.config.building_footprints:
1439
+ method = "external_footprints"
1440
+ # Get footprints file
1441
+ footprints_path = self.config_base_path.joinpath(
1442
+ self.config.building_footprints
1443
+ )
1444
+ # Read building footprints
1445
+ footprints_gdf = gpd.read_file(footprints_path, engine="pyogrio")
1446
+ field_name = "BF_FID"
1447
+ # If FIAT has footprints already
1448
+ elif all(buildings.geometry.geom_type.isin(["Polygon", "MultiPolygon"])):
1449
+ method = "internal_footprints"
1450
+ footprints_gdf = buildings[[self.impact_columns.object_id, "geometry"]]
1451
+ field_name = self.impact_columns.object_id
1452
+ # If FIAT has points and no external footprints are available
1453
+ else:
1454
+ method = "no_footprints"
1455
+
1456
+ # Based on case follow different workflow
1457
+ if method in ["external_footprints", "internal_footprints"]:
1458
+ footprints = Footprints(
1459
+ footprints=footprints_gdf,
1460
+ fiat_columns=self.impact_columns,
1461
+ field_name=field_name,
1462
+ )
1463
+ footprints.aggregate(fiat_results_df)
1464
+ elif method == "no_footprints":
1465
+ footprints = Footprints(fiat_columns=self.impact_columns)
1466
+ footprints.set_point_data(fiat_results_df)
1467
+
1468
+ # Normalize damages
1469
+ footprints.calc_normalized_damages()
1470
+
1471
+ # Save footprint
1472
+ footprints.write(output_path)
1473
+
1474
+ def save_roads(self, output_path: os.PathLike):
1475
+ """
1476
+ Save the impacts on roads to a spatial file.
1477
+
1478
+ Parameters
1479
+ ----------
1480
+ output_path : os.PathLike
1481
+ The path where the output spatial file will be saved.
1482
+ """
1483
+ self.logger.info("Calculating road impacts")
1484
+ # Read roads spatial file
1485
+ roads = gpd.read_file(
1486
+ self.outputs["path"].joinpath(self.config.roads_file_name)
1487
+ )
1488
+ roads = roads.rename(columns=self.name_mapping)
1489
+ # Get columns to use
1490
+ aggr_cols = [
1491
+ name
1492
+ for name in self.outputs["table"].columns
1493
+ if self.impact_columns.aggregation_label in name
1494
+ ]
1495
+ inun_cols = [
1496
+ name
1497
+ for name in roads.columns
1498
+ if self.impact_columns.inundation_depth in name
1499
+ ]
1500
+ # Merge data
1501
+ roads = roads[[self.impact_columns.object_id, "geometry"] + inun_cols].merge(
1502
+ self.outputs["table"][
1503
+ [self.impact_columns.object_id, self.impact_columns.primary_object_type]
1504
+ + aggr_cols
1505
+ ],
1506
+ on=self.impact_columns.object_id,
1507
+ )
1508
+ # Save as geopackage
1509
+ roads.to_file(output_path, driver="GPKG")
1510
+
1511
+ @staticmethod
1512
+ def _ensure_correct_hash_spacing_in_csv(
1513
+ model_root: Path, hash_spacing: int = 1
1514
+ ) -> None:
1515
+ """
1516
+ Ensure that the CSV file has the correct number of spaces between hashes.
1517
+
1518
+ When writing csv files, FIAT does not add spaces between the hashes and the line, which leads to errors on linux.
1519
+
1520
+
1521
+ Parameters
1522
+ ----------
1523
+ file_path : Path
1524
+ The path to the model root.
1525
+ hash_spacing : int, optional
1526
+ The number of spaces between hashes, by default 1.
1527
+ """
1528
+ for dirpath, _, filenames in os.walk(model_root):
1529
+ for filename in filenames:
1530
+ if not filename.lower().endswith(".csv"):
1531
+ continue
1532
+ file_path = os.path.join(dirpath, filename)
1533
+
1534
+ with open(file_path, "r") as file:
1535
+ lines = file.readlines()
1536
+
1537
+ with open(file_path, "w") as file:
1538
+ for line in lines:
1539
+ if line.startswith("#"):
1540
+ line = "#" + " " * hash_spacing + line.lstrip("#")
1541
+ file.write(line)