flood-adapt 0.3.9__py3-none-any.whl → 0.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. flood_adapt/__init__.py +26 -22
  2. flood_adapt/adapter/__init__.py +9 -9
  3. flood_adapt/adapter/fiat_adapter.py +1541 -1541
  4. flood_adapt/adapter/interface/hazard_adapter.py +70 -70
  5. flood_adapt/adapter/interface/impact_adapter.py +36 -36
  6. flood_adapt/adapter/interface/model_adapter.py +89 -89
  7. flood_adapt/adapter/interface/offshore.py +19 -19
  8. flood_adapt/adapter/sfincs_adapter.py +1853 -1848
  9. flood_adapt/adapter/sfincs_offshore.py +187 -193
  10. flood_adapt/config/config.py +248 -248
  11. flood_adapt/config/fiat.py +219 -219
  12. flood_adapt/config/gui.py +331 -331
  13. flood_adapt/config/sfincs.py +481 -336
  14. flood_adapt/config/site.py +129 -129
  15. flood_adapt/database_builder/database_builder.py +2210 -2210
  16. flood_adapt/database_builder/templates/default_units/imperial.toml +9 -9
  17. flood_adapt/database_builder/templates/default_units/metric.toml +9 -9
  18. flood_adapt/database_builder/templates/green_infra_table/green_infra_lookup_table.csv +10 -10
  19. flood_adapt/database_builder/templates/infographics/OSM/config_charts.toml +90 -90
  20. flood_adapt/database_builder/templates/infographics/OSM/config_people.toml +57 -57
  21. flood_adapt/database_builder/templates/infographics/OSM/config_risk_charts.toml +121 -121
  22. flood_adapt/database_builder/templates/infographics/OSM/config_roads.toml +65 -65
  23. flood_adapt/database_builder/templates/infographics/OSM/styles.css +45 -45
  24. flood_adapt/database_builder/templates/infographics/US_NSI/config_charts.toml +126 -126
  25. flood_adapt/database_builder/templates/infographics/US_NSI/config_people.toml +60 -60
  26. flood_adapt/database_builder/templates/infographics/US_NSI/config_risk_charts.toml +121 -121
  27. flood_adapt/database_builder/templates/infographics/US_NSI/config_roads.toml +65 -65
  28. flood_adapt/database_builder/templates/infographics/US_NSI/styles.css +45 -45
  29. flood_adapt/database_builder/templates/infometrics/OSM/metrics_additional_risk_configs.toml +4 -4
  30. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config.toml +143 -143
  31. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config_risk.toml +153 -153
  32. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config.toml +127 -127
  33. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config_risk.toml +57 -57
  34. flood_adapt/database_builder/templates/infometrics/US_NSI/metrics_additional_risk_configs.toml +4 -4
  35. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config.toml +191 -191
  36. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config_risk.toml +153 -153
  37. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config.toml +178 -178
  38. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config_risk.toml +57 -57
  39. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config.toml +9 -9
  40. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config_risk.toml +65 -65
  41. flood_adapt/database_builder/templates/output_layers/bin_colors.toml +5 -5
  42. flood_adapt/database_builder.py +16 -16
  43. flood_adapt/dbs_classes/__init__.py +21 -21
  44. flood_adapt/dbs_classes/database.py +533 -684
  45. flood_adapt/dbs_classes/dbs_benefit.py +77 -76
  46. flood_adapt/dbs_classes/dbs_event.py +61 -59
  47. flood_adapt/dbs_classes/dbs_measure.py +112 -111
  48. flood_adapt/dbs_classes/dbs_projection.py +34 -34
  49. flood_adapt/dbs_classes/dbs_scenario.py +137 -137
  50. flood_adapt/dbs_classes/dbs_static.py +274 -273
  51. flood_adapt/dbs_classes/dbs_strategy.py +130 -129
  52. flood_adapt/dbs_classes/dbs_template.py +279 -278
  53. flood_adapt/dbs_classes/interface/database.py +107 -139
  54. flood_adapt/dbs_classes/interface/element.py +121 -121
  55. flood_adapt/dbs_classes/interface/static.py +47 -47
  56. flood_adapt/flood_adapt.py +1229 -1178
  57. flood_adapt/misc/database_user.py +16 -16
  58. flood_adapt/misc/exceptions.py +22 -0
  59. flood_adapt/misc/log.py +183 -183
  60. flood_adapt/misc/path_builder.py +54 -54
  61. flood_adapt/misc/utils.py +185 -185
  62. flood_adapt/objects/__init__.py +82 -82
  63. flood_adapt/objects/benefits/benefits.py +61 -61
  64. flood_adapt/objects/events/event_factory.py +135 -135
  65. flood_adapt/objects/events/event_set.py +88 -84
  66. flood_adapt/objects/events/events.py +236 -234
  67. flood_adapt/objects/events/historical.py +58 -58
  68. flood_adapt/objects/events/hurricane.py +68 -67
  69. flood_adapt/objects/events/synthetic.py +46 -50
  70. flood_adapt/objects/forcing/__init__.py +92 -92
  71. flood_adapt/objects/forcing/csv.py +68 -68
  72. flood_adapt/objects/forcing/discharge.py +66 -66
  73. flood_adapt/objects/forcing/forcing.py +150 -150
  74. flood_adapt/objects/forcing/forcing_factory.py +182 -182
  75. flood_adapt/objects/forcing/meteo_handler.py +93 -93
  76. flood_adapt/objects/forcing/netcdf.py +40 -40
  77. flood_adapt/objects/forcing/plotting.py +453 -429
  78. flood_adapt/objects/forcing/rainfall.py +98 -98
  79. flood_adapt/objects/forcing/tide_gauge.py +191 -191
  80. flood_adapt/objects/forcing/time_frame.py +90 -90
  81. flood_adapt/objects/forcing/timeseries.py +564 -564
  82. flood_adapt/objects/forcing/unit_system.py +580 -580
  83. flood_adapt/objects/forcing/waterlevels.py +108 -108
  84. flood_adapt/objects/forcing/wind.py +124 -124
  85. flood_adapt/objects/measures/measure_factory.py +92 -92
  86. flood_adapt/objects/measures/measures.py +551 -529
  87. flood_adapt/objects/object_model.py +74 -68
  88. flood_adapt/objects/projections/projections.py +103 -103
  89. flood_adapt/objects/scenarios/scenarios.py +22 -22
  90. flood_adapt/objects/strategies/strategies.py +89 -89
  91. flood_adapt/workflows/benefit_runner.py +579 -554
  92. flood_adapt/workflows/floodmap.py +85 -85
  93. flood_adapt/workflows/impacts_integrator.py +85 -85
  94. flood_adapt/workflows/scenario_runner.py +70 -70
  95. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/LICENSE +674 -674
  96. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/METADATA +867 -865
  97. flood_adapt-0.3.11.dist-info/RECORD +140 -0
  98. flood_adapt-0.3.9.dist-info/RECORD +0 -139
  99. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/WHEEL +0 -0
  100. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/top_level.txt +0 -0
@@ -1,684 +1,533 @@
1
- import gc
2
- import os
3
- import shutil
4
- import time
5
- from pathlib import Path
6
- from typing import Any, Literal, Optional, Union
7
-
8
- import geopandas as gpd
9
- import numpy as np
10
- import pandas as pd
11
- import xarray as xr
12
- from cht_cyclones.tropical_cyclone import TropicalCyclone
13
- from geopandas import GeoDataFrame
14
- from plotly.express import line
15
- from plotly.express.colors import sample_colorscale
16
-
17
- from flood_adapt.config.site import Site
18
- from flood_adapt.dbs_classes.dbs_benefit import DbsBenefit
19
- from flood_adapt.dbs_classes.dbs_event import DbsEvent
20
- from flood_adapt.dbs_classes.dbs_measure import DbsMeasure
21
- from flood_adapt.dbs_classes.dbs_projection import DbsProjection
22
- from flood_adapt.dbs_classes.dbs_scenario import DbsScenario
23
- from flood_adapt.dbs_classes.dbs_static import DbsStatic
24
- from flood_adapt.dbs_classes.dbs_strategy import DbsStrategy
25
- from flood_adapt.dbs_classes.interface.database import IDatabase
26
- from flood_adapt.misc.log import FloodAdaptLogging
27
- from flood_adapt.misc.path_builder import (
28
- TopLevelDir,
29
- db_path,
30
- )
31
- from flood_adapt.misc.utils import finished_file_exists
32
- from flood_adapt.objects.benefits.benefits import Benefit
33
- from flood_adapt.objects.events.events import Event
34
- from flood_adapt.objects.forcing import unit_system as us
35
- from flood_adapt.objects.scenarios.scenarios import Scenario
36
- from flood_adapt.workflows.benefit_runner import BenefitRunner
37
- from flood_adapt.workflows.scenario_runner import ScenarioRunner
38
-
39
-
40
- class Database(IDatabase):
41
- """Implementation of IDatabase class that holds the site information and has methods to get static data info, and all the input information.
42
-
43
- Additionally it can manipulate (add, edit, copy and delete) any of the objects in the input.
44
- """
45
-
46
- _instance = None
47
-
48
- database_path: Union[str, os.PathLike]
49
- database_name: str
50
- _init_done: bool = False
51
-
52
- base_path: Path
53
- input_path: Path
54
- static_path: Path
55
- output_path: Path
56
-
57
- _site: Site
58
-
59
- _events: DbsEvent
60
- _scenarios: DbsScenario
61
- _strategies: DbsStrategy
62
- _measures: DbsMeasure
63
- _projections: DbsProjection
64
- _benefits: DbsBenefit
65
-
66
- _static: DbsStatic
67
-
68
- def __new__(cls, *args, **kwargs):
69
- if not cls._instance: # Singleton pattern
70
- cls._instance = super(Database, cls).__new__(cls)
71
- return cls._instance
72
-
73
- def __init__(
74
- self,
75
- database_path: Union[str, os.PathLike, None] = None,
76
- database_name: Optional[str] = None,
77
- ) -> None:
78
- """
79
- Initialize the DatabaseController object.
80
-
81
- Parameters
82
- ----------
83
- database_path : Union[str, os.PathLike]
84
- The path to the database root
85
- database_name : str
86
- The name of the database.
87
- -----
88
- """
89
- if database_path is None or database_name is None:
90
- if not self._init_done:
91
- raise ValueError(
92
- """Database path and name must be provided for the first initialization.
93
- To do this, run `flood_adapt.api.static.read_database(database_path, site_name)` first."""
94
- )
95
- else:
96
- return # Skip re-initialization
97
-
98
- if (
99
- self._init_done
100
- and self.database_path == database_path
101
- and self.database_name == database_name
102
- ):
103
- return # Skip re-initialization
104
-
105
- # If the database is not initialized, or a new path or name is provided, (re-)initialize
106
- re_option = "re-" if self._init_done else ""
107
- self.logger = FloodAdaptLogging.getLogger("Database")
108
- self.logger.info(
109
- f"{re_option}initializing database to {database_name} at {database_path}".capitalize()
110
- )
111
- self.database_path = database_path
112
- self.database_name = database_name
113
-
114
- # Set the paths
115
-
116
- self.base_path = Path(database_path) / database_name
117
- self.input_path = db_path(TopLevelDir.input)
118
- self.static_path = db_path(TopLevelDir.static)
119
- self.output_path = db_path(TopLevelDir.output)
120
-
121
- self._site = Site.load_file(self.static_path / "config" / "site.toml")
122
-
123
- # Initialize the different database objects
124
- self._static = DbsStatic(self)
125
- self._events = DbsEvent(
126
- self, standard_objects=self.site.standard_objects.events
127
- )
128
- self._scenarios = DbsScenario(self)
129
- self._strategies = DbsStrategy(
130
- self, standard_objects=self.site.standard_objects.strategies
131
- )
132
- self._measures = DbsMeasure(self)
133
- self._projections = DbsProjection(
134
- self, standard_objects=self.site.standard_objects.projections
135
- )
136
- self._benefits = DbsBenefit(self)
137
-
138
- # Delete any unfinished/crashed scenario output
139
- self.cleanup()
140
-
141
- self._init_done = True
142
-
143
- def shutdown(self):
144
- """Explicitly shut down the singleton and clear all references."""
145
- import gc
146
-
147
- self._instance = None
148
- self._init_done = False
149
-
150
- self.__class__._instance = None
151
- self.__dict__.clear()
152
- gc.collect()
153
-
154
- # Property methods
155
- @property
156
- def site(self) -> Site:
157
- return self._site
158
-
159
- @property
160
- def static(self) -> DbsStatic:
161
- return self._static
162
-
163
- @property
164
- def events(self) -> DbsEvent:
165
- return self._events
166
-
167
- @property
168
- def scenarios(self) -> DbsScenario:
169
- return self._scenarios
170
-
171
- @property
172
- def strategies(self) -> DbsStrategy:
173
- return self._strategies
174
-
175
- @property
176
- def measures(self) -> DbsMeasure:
177
- return self._measures
178
-
179
- @property
180
- def projections(self) -> DbsProjection:
181
- return self._projections
182
-
183
- @property
184
- def benefits(self) -> DbsBenefit:
185
- return self._benefits
186
-
187
- def interp_slr(self, slr_scenario: str, year: float) -> float:
188
- """Interpolate SLR value and reference it to the SLR reference year from the site toml.
189
-
190
- Parameters
191
- ----------
192
- slr_scenario : str
193
- SLR scenario name from the coulmn names in static/slr/slr.csv
194
- year : float
195
- year to evaluate
196
-
197
- Returns
198
- -------
199
- float
200
- _description_
201
-
202
- Raises
203
- ------
204
- ValueError
205
- if the reference year is outside of the time range in the slr.csv file
206
- ValueError
207
- if the year to evaluate is outside of the time range in the slr.csv file
208
- """
209
- if self.site.sfincs.slr_scenarios is None:
210
- raise ValueError("No SLR scenarios defined in the site configuration.")
211
-
212
- input_file = self.static_path / self.site.sfincs.slr_scenarios.file
213
- df = pd.read_csv(input_file)
214
- if year > df["year"].max() or year < df["year"].min():
215
- raise ValueError(
216
- "The selected year is outside the range of the available SLR scenarios"
217
- )
218
- else:
219
- slr = np.interp(year, df["year"], df[slr_scenario])
220
- ref_year = self.site.sfincs.slr_scenarios.relative_to_year
221
- if ref_year > df["year"].max() or ref_year < df["year"].min():
222
- raise ValueError(
223
- f"The reference year {ref_year} is outside the range of the available SLR scenarios"
224
- )
225
- else:
226
- ref_slr = np.interp(ref_year, df["year"], df[slr_scenario])
227
- new_slr = us.UnitfulLength(
228
- value=slr - ref_slr,
229
- units=df["units"][0],
230
- )
231
- gui_units = self.site.gui.units.default_length_units
232
- return np.round(new_slr.convert(gui_units), decimals=2)
233
-
234
- # TODO: should probably be moved to frontend
235
- def plot_slr_scenarios(self) -> str:
236
- if self.site.sfincs.slr_scenarios is None:
237
- raise ValueError("No SLR scenarios defined in the site configuration.")
238
- input_file = self.input_path.parent.joinpath(
239
- "static", self.site.sfincs.slr_scenarios.file
240
- )
241
- df = pd.read_csv(input_file)
242
- ncolors = len(df.columns) - 2
243
- if "units" not in df.columns:
244
- raise ValueError(f"Expected column `units` in {input_file}.")
245
-
246
- units = df["units"].iloc[0]
247
- units = us.UnitTypesLength(units)
248
-
249
- if "Year" not in df.columns:
250
- if "year" not in df.columns:
251
- raise ValueError(f"Expected column `year` in {input_file}.")
252
- else:
253
- df = df.rename(columns={"year": "Year"})
254
-
255
- ref_year = self.site.sfincs.slr_scenarios.relative_to_year
256
- if ref_year > df["Year"].max() or ref_year < df["Year"].min():
257
- raise ValueError(
258
- f"The reference year {ref_year} is outside the range of the available SLR scenarios"
259
- )
260
- else:
261
- scenarios = self._static.get_slr_scn_names()
262
- for scn in scenarios:
263
- ref_slr = np.interp(ref_year, df["Year"], df[scn])
264
- df[scn] -= ref_slr
265
-
266
- df = df.drop(columns="units").melt(id_vars=["Year"]).reset_index(drop=True)
267
- # convert to units used in GUI
268
- slr_current_units = us.UnitfulLength(value=1.0, units=units)
269
- conversion_factor = slr_current_units.convert(
270
- self.site.gui.units.default_length_units
271
- )
272
- df.iloc[:, -1] = (conversion_factor * df.iloc[:, -1]).round(decimals=2)
273
-
274
- # rename column names that will be shown in html
275
- df = df.rename(
276
- columns={
277
- "variable": "Scenario",
278
- "value": f"Sea level rise [{self.site.gui.units.default_length_units.value}]",
279
- }
280
- )
281
-
282
- colors = sample_colorscale(
283
- "rainbow", [n / (ncolors - 1) for n in range(ncolors)]
284
- )
285
- fig = line(
286
- df,
287
- x="Year",
288
- y=f"Sea level rise [{self.site.gui.units.default_length_units.value}]",
289
- color="Scenario",
290
- color_discrete_sequence=colors,
291
- )
292
-
293
- # fig.update_traces(marker={"line": {"color": "#000000", "width": 2}})
294
-
295
- fig.update_layout(
296
- autosize=False,
297
- height=100 * 1.2,
298
- width=280 * 1.3,
299
- margin={"r": 0, "l": 0, "b": 0, "t": 0},
300
- font={"size": 10, "color": "black", "family": "Arial"},
301
- title_font={"size": 10, "color": "black", "family": "Arial"},
302
- legend_font={"size": 10, "color": "black", "family": "Arial"},
303
- legend_grouptitlefont={"size": 10, "color": "black", "family": "Arial"},
304
- legend={"entrywidthmode": "fraction", "entrywidth": 0.2},
305
- yaxis_title_font={"size": 10, "color": "black", "family": "Arial"},
306
- xaxis_title=None,
307
- xaxis_range=[ref_year, df["Year"].max()],
308
- legend_title=None,
309
- # paper_bgcolor="#3A3A3A",
310
- # plot_bgcolor="#131313",
311
- )
312
-
313
- # write html to results folder
314
- output_loc = self.input_path.parent.joinpath("temp", "slr.html")
315
- output_loc.parent.mkdir(parents=True, exist_ok=True)
316
- fig.write_html(output_loc)
317
- return str(output_loc)
318
-
319
- def write_to_csv(self, name: str, event: Event, df: pd.DataFrame):
320
- df.to_csv(
321
- self.events.input_path.joinpath(event.name, f"{name}.csv"),
322
- header=False,
323
- )
324
-
325
- def write_cyc(self, event: Event, track: TropicalCyclone):
326
- cyc_file = self.events.input_path / event.name / f"{event.track_name}.cyc"
327
- # cht_cyclone function to write TropicalCyclone as .cyc file
328
- track.write_track(filename=cyc_file, fmt="ddb_cyc")
329
-
330
- def check_benefit_scenarios(self, benefit: Benefit) -> pd.DataFrame:
331
- """Return a dataframe with the scenarios needed for this benefit assessment run.
332
-
333
- Parameters
334
- ----------
335
- benefit : Benefit
336
- """
337
- runner = BenefitRunner(self, benefit=benefit)
338
- return runner.scenarios
339
-
340
- def create_benefit_scenarios(self, benefit: Benefit) -> None:
341
- """Create any scenarios that are needed for the (cost-)benefit assessment and are not there already.
342
-
343
- Parameters
344
- ----------
345
- benefit : Benefit
346
- """
347
- runner = BenefitRunner(self, benefit=benefit)
348
-
349
- # Iterate through the scenarios needed and create them if not existing
350
- for index, row in runner.scenarios.iterrows():
351
- if row["scenario created"] == "No":
352
- scenario_dict = {}
353
- scenario_dict["event"] = row["event"]
354
- scenario_dict["projection"] = row["projection"]
355
- scenario_dict["strategy"] = row["strategy"]
356
- scenario_dict["name"] = "_".join(
357
- [row["projection"], row["event"], row["strategy"]]
358
- )
359
-
360
- scenario_obj = Scenario(**scenario_dict)
361
- # Check if scenario already exists (because it was created before in the loop)
362
- try:
363
- self.scenarios.save(scenario_obj)
364
- except ValueError as e:
365
- if "name is already used" not in str(e):
366
- # some other error was raised, so we re-raise it
367
- raise e
368
- # otherwise, if it already exists and we dont need to save it, we can just continue
369
-
370
- def run_benefit(self, benefit_name: Union[str, list[str]]) -> None:
371
- """Run a (cost-)benefit analysis.
372
-
373
- Parameters
374
- ----------
375
- benefit_name : Union[str, list[str]]
376
- name(s) of the benefits to run.
377
- """
378
- if not isinstance(benefit_name, list):
379
- benefit_name = [benefit_name]
380
- for name in benefit_name:
381
- benefit = self.benefits.get(name)
382
- runner = BenefitRunner(self, benefit=benefit)
383
- runner.run_cost_benefit()
384
-
385
- def get_outputs(self) -> dict[str, Any]:
386
- """Return a dictionary with info on the outputs that currently exist in the database.
387
-
388
- Returns
389
- -------
390
- dict[str, Any]
391
- Includes 'name', 'path', 'last_modification_date' and "finished" info
392
- """
393
- all_scenarios = pd.DataFrame(self._scenarios.summarize_objects())
394
- if len(all_scenarios) > 0:
395
- df = all_scenarios[all_scenarios["finished"]]
396
- else:
397
- df = all_scenarios
398
- finished = df.drop(columns="finished").reset_index(drop=True)
399
- return finished.to_dict()
400
-
401
- def get_topobathy_path(self) -> str:
402
- """Return the path of the topobathy tiles in order to create flood maps with water level maps.
403
-
404
- Returns
405
- -------
406
- str
407
- path to topobathy tiles
408
- """
409
- path = self.input_path.parent.joinpath("static", "dem", "tiles", "topobathy")
410
- return str(path)
411
-
412
- def get_index_path(self) -> str:
413
- """Return the path of the index tiles which are used to connect each water level cell with the topobathy tiles.
414
-
415
- Returns
416
- -------
417
- str
418
- path to index tiles
419
- """
420
- path = self.input_path.parent.joinpath("static", "dem", "tiles", "indices")
421
- return str(path)
422
-
423
- def get_depth_conversion(self) -> float:
424
- """Return the flood depth conversion that is need in the gui to plot the flood map.
425
-
426
- Returns
427
- -------
428
- float
429
- conversion factor
430
- """
431
- # Get conresion factor need to get from the sfincs units to the gui units
432
- units = us.UnitfulLength(
433
- value=1, units=self.site.gui.units.default_length_units
434
- )
435
- unit_cor = units.convert(new_units=us.UnitTypesLength.meters)
436
-
437
- return unit_cor
438
-
439
- def get_max_water_level(
440
- self,
441
- scenario_name: str,
442
- return_period: Optional[int] = None,
443
- ) -> np.ndarray:
444
- """Return an array with the maximum water levels during an event.
445
-
446
- Parameters
447
- ----------
448
- scenario_name : str
449
- name of scenario
450
- return_period : int, optional
451
- return period in years, by default None
452
-
453
- Returns
454
- -------
455
- np.array
456
- 2D map of maximum water levels
457
- """
458
- # If single event read with hydromt-sfincs
459
- if not return_period:
460
- map_path = self.scenarios.output_path.joinpath(
461
- scenario_name,
462
- "Flooding",
463
- "max_water_level_map.nc",
464
- )
465
- with xr.open_dataarray(map_path) as map:
466
- zsmax = map.to_numpy()
467
- else:
468
- file_path = self.scenarios.output_path.joinpath(
469
- scenario_name,
470
- "Flooding",
471
- f"RP_{return_period:04d}_maps.nc",
472
- )
473
- with xr.open_dataset(file_path) as ds:
474
- zsmax = ds["risk_map"][:, :].to_numpy().T
475
- return zsmax
476
-
477
- def get_building_footprints(self, scenario_name: str) -> GeoDataFrame:
478
- """Return a geodataframe of the impacts at the footprint level.
479
-
480
- Parameters
481
- ----------
482
- scenario_name : str
483
- name of scenario
484
-
485
- Returns
486
- -------
487
- GeoDataFrame
488
- impacts at footprint level
489
- """
490
- out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
491
- footprints = out_path / f"Impacts_building_footprints_{scenario_name}.gpkg"
492
- gdf = gpd.read_file(footprints, engine="pyogrio")
493
- gdf = gdf.to_crs(4326)
494
- return gdf
495
-
496
- def get_roads(self, scenario_name: str) -> GeoDataFrame:
497
- """Return a geodataframe of the impacts at roads.
498
-
499
- Parameters
500
- ----------
501
- scenario_name : str
502
- name of scenario
503
-
504
- Returns
505
- -------
506
- GeoDataFrame
507
- Impacts at roads
508
- """
509
- out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
510
- roads = out_path / f"Impacts_roads_{scenario_name}.gpkg"
511
- gdf = gpd.read_file(roads, engine="pyogrio")
512
- gdf = gdf.to_crs(4326)
513
- return gdf
514
-
515
- def get_aggregation(self, scenario_name: str) -> dict[str, gpd.GeoDataFrame]:
516
- """Return a dictionary with the aggregated impacts as geodataframes.
517
-
518
- Parameters
519
- ----------
520
- scenario_name : str
521
- name of the scenario
522
-
523
- Returns
524
- -------
525
- dict[GeoDataFrame]
526
- dictionary with aggregated damages per aggregation type
527
- """
528
- out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
529
- gdfs = {}
530
- for aggr_area in out_path.glob(f"Impacts_aggregated_{scenario_name}_*.gpkg"):
531
- label = aggr_area.stem.split(f"{scenario_name}_")[-1]
532
- gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
533
- gdfs[label] = gdfs[label].to_crs(4326)
534
- return gdfs
535
-
536
- def get_aggregation_benefits(
537
- self, benefit_name: str
538
- ) -> dict[str, gpd.GeoDataFrame]:
539
- """Get a dictionary with the aggregated benefits as geodataframes.
540
-
541
- Parameters
542
- ----------
543
- benefit_name : str
544
- name of the benefit analysis
545
-
546
- Returns
547
- -------
548
- dict[GeoDataFrame]
549
- dictionary with aggregated benefits per aggregation type
550
- """
551
- out_path = self.benefits.output_path.joinpath(
552
- benefit_name,
553
- )
554
- gdfs = {}
555
- for aggr_area in out_path.glob("benefits_*.gpkg"):
556
- label = aggr_area.stem.split("benefits_")[-1]
557
- gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
558
- gdfs[label] = gdfs[label].to_crs(4326)
559
- return gdfs
560
-
561
- def get_object_list(
562
- self,
563
- object_type: Literal[
564
- "projections", "events", "measures", "strategies", "scenarios", "benefits"
565
- ],
566
- ) -> dict[str, Any]:
567
- """Get a dictionary with all the toml paths and last modification dates that exist in the database that correspond to object_type.
568
-
569
- Parameters
570
- ----------
571
- object_type : str
572
- Can be 'projections', 'events', 'measures', 'strategies' or 'scenarios'
573
-
574
- Returns
575
- -------
576
- dict[str, Any]
577
- Includes 'path' and 'last_modification_date' info
578
- """
579
- match object_type:
580
- case "projections":
581
- return self.projections.summarize_objects()
582
- case "events":
583
- return self.events.summarize_objects()
584
- case "measures":
585
- return self.measures.summarize_objects()
586
- case "strategies":
587
- return self.strategies.summarize_objects()
588
- case "scenarios":
589
- return self.scenarios.summarize_objects()
590
- case "benefits":
591
- return self.benefits.summarize_objects()
592
- case _:
593
- raise ValueError(
594
- f"Object type '{object_type}' is not valid. Must be one of 'projections', 'events', 'measures', 'strategies' or 'scenarios'."
595
- )
596
-
597
- def has_run_hazard(self, scenario_name: str) -> None:
598
- """Check if there is already a simulation that has the exact same hazard component.
599
-
600
- If yes that is copied to avoid running the hazard model twice.
601
-
602
- Parameters
603
- ----------
604
- scenario_name : str
605
- name of the scenario to check if needs to be rerun for hazard
606
- """
607
- scenario = self.scenarios.get(scenario_name)
608
- runner = ScenarioRunner(self, scenario=scenario)
609
-
610
- # Dont do anything if the hazard model has already been run in itself
611
- if runner.impacts.hazard.has_run:
612
- return
613
-
614
- scenarios = [
615
- self.scenarios.get(scn)
616
- for scn in self.scenarios.summarize_objects()["name"]
617
- ]
618
- scns_simulated = [
619
- sim
620
- for sim in scenarios
621
- if self.scenarios.output_path.joinpath(sim.name, "Flooding").is_dir()
622
- ]
623
-
624
- for scn in scns_simulated:
625
- if self.scenarios.equal_hazard_components(scn, scenario):
626
- existing = self.scenarios.output_path.joinpath(scn.name, "Flooding")
627
- path_new = self.scenarios.output_path.joinpath(
628
- scenario.name, "Flooding"
629
- )
630
- _runner = ScenarioRunner(self, scenario=scn)
631
-
632
- if _runner.impacts.hazard.has_run: # only copy results if the hazard model has actually finished and skip simulation folders
633
- shutil.copytree(
634
- existing,
635
- path_new,
636
- dirs_exist_ok=True,
637
- ignore=shutil.ignore_patterns("simulations"),
638
- )
639
- self.logger.info(
640
- f"Hazard simulation is used from the '{scn.name}' scenario"
641
- )
642
-
643
- def cleanup(self) -> None:
644
- """
645
- Remove corrupted scenario output.
646
-
647
- This method removes any scenario output that:
648
- - is corrupted due to unfinished runs
649
- - does not have a corresponding input
650
-
651
- """
652
- if not self.scenarios.output_path.is_dir():
653
- return
654
-
655
- input_scenarios = [
656
- (self.scenarios.input_path / dir).resolve()
657
- for dir in os.listdir(self.scenarios.input_path)
658
- ]
659
- output_scenarios = [
660
- (self.scenarios.output_path / dir).resolve()
661
- for dir in os.listdir(self.scenarios.output_path)
662
- ]
663
-
664
- def _call_garbage_collector(func, path, exc_info, retries=5, delay=0.1):
665
- """Retry deletion up to 5 times if the file is locked."""
666
- for attempt in range(retries):
667
- gc.collect()
668
- time.sleep(delay)
669
- try:
670
- func(path) # Retry deletion
671
- return # Exit if successful
672
- except Exception as e:
673
- print(
674
- f"Attempt {attempt + 1}/{retries} failed to delete {path}: {e}"
675
- )
676
-
677
- print(f"Giving up on deleting {path} after {retries} attempts.")
678
-
679
- for dir in output_scenarios:
680
- # Delete if: input was deleted or corrupted output due to unfinished run
681
- if dir.name not in [
682
- path.name for path in input_scenarios
683
- ] or not finished_file_exists(dir):
684
- shutil.rmtree(dir, onerror=_call_garbage_collector)
1
+ import gc
2
+ import os
3
+ import shutil
4
+ import time
5
+ from pathlib import Path
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ import geopandas as gpd
9
+ import numpy as np
10
+ import pandas as pd
11
+ import xarray as xr
12
+ from geopandas import GeoDataFrame
13
+
14
+ from flood_adapt.config.sfincs import SlrScenariosModel
15
+ from flood_adapt.config.site import Site
16
+ from flood_adapt.dbs_classes.dbs_benefit import DbsBenefit
17
+ from flood_adapt.dbs_classes.dbs_event import DbsEvent
18
+ from flood_adapt.dbs_classes.dbs_measure import DbsMeasure
19
+ from flood_adapt.dbs_classes.dbs_projection import DbsProjection
20
+ from flood_adapt.dbs_classes.dbs_scenario import DbsScenario
21
+ from flood_adapt.dbs_classes.dbs_static import DbsStatic
22
+ from flood_adapt.dbs_classes.dbs_strategy import DbsStrategy
23
+ from flood_adapt.dbs_classes.interface.database import IDatabase
24
+ from flood_adapt.misc.exceptions import DatabaseError
25
+ from flood_adapt.misc.log import FloodAdaptLogging
26
+ from flood_adapt.misc.path_builder import (
27
+ TopLevelDir,
28
+ db_path,
29
+ )
30
+ from flood_adapt.misc.utils import finished_file_exists
31
+ from flood_adapt.objects.forcing import unit_system as us
32
+ from flood_adapt.workflows.scenario_runner import ScenarioRunner
33
+
34
+
35
+ class Database(IDatabase):
36
+ """Implementation of IDatabase class that holds the site information and has methods to get static data info, and all the input information.
37
+
38
+ Additionally it can manipulate (add, edit, copy and delete) any of the objects in the input.
39
+ """
40
+
41
+ _instance = None
42
+
43
+ database_path: Union[str, os.PathLike]
44
+ database_name: str
45
+ _init_done: bool = False
46
+
47
+ base_path: Path
48
+ input_path: Path
49
+ static_path: Path
50
+ output_path: Path
51
+
52
+ _site: Site
53
+
54
+ _events: DbsEvent
55
+ _scenarios: DbsScenario
56
+ _strategies: DbsStrategy
57
+ _measures: DbsMeasure
58
+ _projections: DbsProjection
59
+ _benefits: DbsBenefit
60
+
61
+ _static: DbsStatic
62
+
63
+ def __new__(cls, *args, **kwargs):
64
+ if not cls._instance: # Singleton pattern
65
+ cls._instance = super(Database, cls).__new__(cls)
66
+ return cls._instance
67
+
68
+ def __init__(
69
+ self,
70
+ database_path: Union[str, os.PathLike, None] = None,
71
+ database_name: Optional[str] = None,
72
+ ) -> None:
73
+ """
74
+ Initialize the DatabaseController object.
75
+
76
+ Parameters
77
+ ----------
78
+ database_path : Union[str, os.PathLike]
79
+ The path to the database root
80
+ database_name : str
81
+ The name of the database.
82
+ -----
83
+ """
84
+ if database_path is None or database_name is None:
85
+ if not self._init_done:
86
+ raise DatabaseError(
87
+ """Database path and name must be provided for the first initialization.
88
+ To do this, run `flood_adapt.api.static.read_database(database_path, site_name)` first."""
89
+ )
90
+ else:
91
+ return # Skip re-initialization
92
+
93
+ if (
94
+ self._init_done
95
+ and self.database_path == database_path
96
+ and self.database_name == database_name
97
+ ):
98
+ return # Skip re-initialization
99
+
100
+ # If the database is not initialized, or a new path or name is provided, (re-)initialize
101
+ re_option = "re-" if self._init_done else ""
102
+ self.logger = FloodAdaptLogging.getLogger("Database")
103
+ self.logger.info(
104
+ f"{re_option}initializing database to {database_name} at {database_path}".capitalize()
105
+ )
106
+ self.database_path = database_path
107
+ self.database_name = database_name
108
+
109
+ # Set the paths
110
+
111
+ self.base_path = Path(database_path) / database_name
112
+ self.input_path = db_path(TopLevelDir.input)
113
+ self.static_path = db_path(TopLevelDir.static)
114
+ self.output_path = db_path(TopLevelDir.output)
115
+
116
+ self._site = Site.load_file(self.static_path / "config" / "site.toml")
117
+
118
+ # Initialize the different database objects
119
+ self._static = DbsStatic(self)
120
+ self._events = DbsEvent(
121
+ self, standard_objects=self.site.standard_objects.events
122
+ )
123
+ self._scenarios = DbsScenario(self)
124
+ self._strategies = DbsStrategy(
125
+ self, standard_objects=self.site.standard_objects.strategies
126
+ )
127
+ self._measures = DbsMeasure(self)
128
+ self._projections = DbsProjection(
129
+ self, standard_objects=self.site.standard_objects.projections
130
+ )
131
+ self._benefits = DbsBenefit(self)
132
+
133
+ # Delete any unfinished/crashed scenario output
134
+ self.cleanup()
135
+
136
+ self._init_done = True
137
+
138
+ def shutdown(self):
139
+ """Explicitly shut down the singleton and clear all references."""
140
+ import gc
141
+
142
+ self._instance = None
143
+ self._init_done = False
144
+
145
+ self.__class__._instance = None
146
+ self.__dict__.clear()
147
+ gc.collect()
148
+
149
+ # Property methods
150
+ @property
151
+ def site(self) -> Site:
152
+ return self._site
153
+
154
+ @property
155
+ def static(self) -> DbsStatic:
156
+ return self._static
157
+
158
+ @property
159
+ def events(self) -> DbsEvent:
160
+ return self._events
161
+
162
+ @property
163
+ def scenarios(self) -> DbsScenario:
164
+ return self._scenarios
165
+
166
+ @property
167
+ def strategies(self) -> DbsStrategy:
168
+ return self._strategies
169
+
170
+ @property
171
+ def measures(self) -> DbsMeasure:
172
+ return self._measures
173
+
174
+ @property
175
+ def projections(self) -> DbsProjection:
176
+ return self._projections
177
+
178
+ @property
179
+ def benefits(self) -> DbsBenefit:
180
+ return self._benefits
181
+
182
+ def get_slr_scenarios(self) -> SlrScenariosModel:
183
+ """Get the path to the SLR scenarios file.
184
+
185
+ Returns
186
+ -------
187
+ SlrScenariosModel
188
+ SLR scenarios configuration model with the file path set to the static path.
189
+ """
190
+ if self.site.sfincs.slr_scenarios is None:
191
+ raise DatabaseError("No SLR scenarios defined in the site configuration.")
192
+ slr = self.site.sfincs.slr_scenarios
193
+ slr.file = str(self.static_path / slr.file)
194
+ return slr
195
+
196
+ def get_outputs(self) -> dict[str, Any]:
197
+ """Return a dictionary with info on the outputs that currently exist in the database.
198
+
199
+ Returns
200
+ -------
201
+ dict[str, Any]
202
+ Includes 'name', 'path', 'last_modification_date' and "finished" info
203
+ """
204
+ all_scenarios = pd.DataFrame(self._scenarios.summarize_objects())
205
+ if len(all_scenarios) > 0:
206
+ df = all_scenarios[all_scenarios["finished"]]
207
+ else:
208
+ df = all_scenarios
209
+ finished = df.drop(columns="finished").reset_index(drop=True)
210
+ return finished.to_dict()
211
+
212
+ def get_topobathy_path(self) -> str:
213
+ """Return the path of the topobathy tiles in order to create flood maps with water level maps.
214
+
215
+ Returns
216
+ -------
217
+ str
218
+ path to topobathy tiles
219
+ """
220
+ path = self.input_path.parent.joinpath("static", "dem", "tiles", "topobathy")
221
+ return str(path)
222
+
223
+ def get_index_path(self) -> str:
224
+ """Return the path of the index tiles which are used to connect each water level cell with the topobathy tiles.
225
+
226
+ Returns
227
+ -------
228
+ str
229
+ path to index tiles
230
+ """
231
+ path = self.input_path.parent.joinpath("static", "dem", "tiles", "indices")
232
+ return str(path)
233
+
234
+ def get_depth_conversion(self) -> float:
235
+ """Return the flood depth conversion that is need in the gui to plot the flood map.
236
+
237
+ Returns
238
+ -------
239
+ float
240
+ conversion factor
241
+ """
242
+ # Get conresion factor need to get from the sfincs units to the gui units
243
+ units = us.UnitfulLength(
244
+ value=1, units=self.site.gui.units.default_length_units
245
+ )
246
+ unit_cor = units.convert(new_units=us.UnitTypesLength.meters)
247
+
248
+ return unit_cor
249
+
250
+ def get_max_water_level(
251
+ self,
252
+ scenario_name: str,
253
+ return_period: Optional[int] = None,
254
+ ) -> np.ndarray:
255
+ """Return an array with the maximum water levels during an event.
256
+
257
+ Parameters
258
+ ----------
259
+ scenario_name : str
260
+ name of scenario
261
+ return_period : int, optional
262
+ return period in years, by default None
263
+
264
+ Returns
265
+ -------
266
+ np.array
267
+ 2D map of maximum water levels
268
+ """
269
+ # If single event read with hydromt-sfincs
270
+ if not return_period:
271
+ map_path = self.scenarios.output_path.joinpath(
272
+ scenario_name,
273
+ "Flooding",
274
+ "max_water_level_map.nc",
275
+ )
276
+ with xr.open_dataarray(map_path) as map:
277
+ zsmax = map.to_numpy()
278
+ else:
279
+ file_path = self.scenarios.output_path.joinpath(
280
+ scenario_name,
281
+ "Flooding",
282
+ f"RP_{return_period:04d}_maps.nc",
283
+ )
284
+ with xr.open_dataset(file_path) as ds:
285
+ zsmax = ds["risk_map"][:, :].to_numpy().T
286
+ return zsmax
287
+
288
+ def get_flood_map_geotiff(
289
+ self,
290
+ scenario_name: str,
291
+ return_period: Optional[int] = None,
292
+ ) -> Optional[Path]:
293
+ """Return the path to the geotiff file with the flood map for the given scenario.
294
+
295
+ Parameters
296
+ ----------
297
+ scenario_name : str
298
+ name of scenario
299
+ return_period : int, optional
300
+ return period in years, by default None. Only for risk scenarios.
301
+
302
+ Returns
303
+ -------
304
+ Optional[Path]
305
+ path to the flood map geotiff file, or None if it does not exist
306
+ """
307
+ if not return_period:
308
+ file_path = self.scenarios.output_path.joinpath(
309
+ scenario_name,
310
+ "Flooding",
311
+ f"FloodMap_{scenario_name}.tif",
312
+ )
313
+ else:
314
+ file_path = self.scenarios.output_path.joinpath(
315
+ scenario_name,
316
+ "Flooding",
317
+ f"RP_{return_period:04d}_maps.tif",
318
+ )
319
+ if not file_path.is_file():
320
+ self.logger.warning(
321
+ f"Flood map for scenario '{scenario_name}' at {file_path} does not exist."
322
+ )
323
+ return None
324
+ return file_path
325
+
326
+ def get_building_footprints(self, scenario_name: str) -> GeoDataFrame:
327
+ """Return a geodataframe of the impacts at the footprint level.
328
+
329
+ Parameters
330
+ ----------
331
+ scenario_name : str
332
+ name of scenario
333
+
334
+ Returns
335
+ -------
336
+ GeoDataFrame
337
+ impacts at footprint level
338
+ """
339
+ out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
340
+ footprints = out_path / f"Impacts_building_footprints_{scenario_name}.gpkg"
341
+ gdf = gpd.read_file(footprints, engine="pyogrio")
342
+ gdf = gdf.to_crs(4326)
343
+ return gdf
344
+
345
+ def get_roads(self, scenario_name: str) -> GeoDataFrame:
346
+ """Return a geodataframe of the impacts at roads.
347
+
348
+ Parameters
349
+ ----------
350
+ scenario_name : str
351
+ name of scenario
352
+
353
+ Returns
354
+ -------
355
+ GeoDataFrame
356
+ Impacts at roads
357
+ """
358
+ out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
359
+ roads = out_path / f"Impacts_roads_{scenario_name}.gpkg"
360
+ gdf = gpd.read_file(roads, engine="pyogrio")
361
+ gdf = gdf.to_crs(4326)
362
+ return gdf
363
+
364
+ def get_aggregation(self, scenario_name: str) -> dict[str, gpd.GeoDataFrame]:
365
+ """Return a dictionary with the aggregated impacts as geodataframes.
366
+
367
+ Parameters
368
+ ----------
369
+ scenario_name : str
370
+ name of the scenario
371
+
372
+ Returns
373
+ -------
374
+ dict[GeoDataFrame]
375
+ dictionary with aggregated damages per aggregation type
376
+ """
377
+ out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
378
+ gdfs = {}
379
+ for aggr_area in out_path.glob(f"Impacts_aggregated_{scenario_name}_*.gpkg"):
380
+ label = aggr_area.stem.split(f"{scenario_name}_")[-1]
381
+ gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
382
+ gdfs[label] = gdfs[label].to_crs(4326)
383
+ return gdfs
384
+
385
+ def get_aggregation_benefits(
386
+ self, benefit_name: str
387
+ ) -> dict[str, gpd.GeoDataFrame]:
388
+ """Get a dictionary with the aggregated benefits as geodataframes.
389
+
390
+ Parameters
391
+ ----------
392
+ benefit_name : str
393
+ name of the benefit analysis
394
+
395
+ Returns
396
+ -------
397
+ dict[GeoDataFrame]
398
+ dictionary with aggregated benefits per aggregation type
399
+ """
400
+ out_path = self.benefits.output_path.joinpath(
401
+ benefit_name,
402
+ )
403
+ gdfs = {}
404
+ for aggr_area in out_path.glob("benefits_*.gpkg"):
405
+ label = aggr_area.stem.split("benefits_")[-1]
406
+ gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
407
+ gdfs[label] = gdfs[label].to_crs(4326)
408
+ return gdfs
409
+
410
+ def get_object_list(
411
+ self,
412
+ object_type: Literal[
413
+ "projections", "events", "measures", "strategies", "scenarios", "benefits"
414
+ ],
415
+ ) -> dict[str, Any]:
416
+ """Get a dictionary with all the toml paths and last modification dates that exist in the database that correspond to object_type.
417
+
418
+ Parameters
419
+ ----------
420
+ object_type : str
421
+ Can be 'projections', 'events', 'measures', 'strategies' or 'scenarios'
422
+
423
+ Returns
424
+ -------
425
+ dict[str, Any]
426
+ Includes 'path' and 'last_modification_date' info
427
+ """
428
+ match object_type:
429
+ case "projections":
430
+ return self.projections.summarize_objects()
431
+ case "events":
432
+ return self.events.summarize_objects()
433
+ case "measures":
434
+ return self.measures.summarize_objects()
435
+ case "strategies":
436
+ return self.strategies.summarize_objects()
437
+ case "scenarios":
438
+ return self.scenarios.summarize_objects()
439
+ case "benefits":
440
+ return self.benefits.summarize_objects()
441
+ case _:
442
+ raise DatabaseError(
443
+ f"Object type '{object_type}' is not valid. Must be one of 'projections', 'events', 'measures', 'strategies' or 'scenarios'."
444
+ )
445
+
446
+ def has_run_hazard(self, scenario_name: str) -> None:
447
+ """Check if there is already a simulation that has the exact same hazard component.
448
+
449
+ If yes that is copied to avoid running the hazard model twice.
450
+
451
+ Parameters
452
+ ----------
453
+ scenario_name : str
454
+ name of the scenario to check if needs to be rerun for hazard
455
+ """
456
+ scenario = self.scenarios.get(scenario_name)
457
+ runner = ScenarioRunner(self, scenario=scenario)
458
+
459
+ # Dont do anything if the hazard model has already been run in itself
460
+ if runner.impacts.hazard.has_run:
461
+ return
462
+
463
+ scenarios = [
464
+ self.scenarios.get(scn)
465
+ for scn in self.scenarios.summarize_objects()["name"]
466
+ ]
467
+ scns_simulated = [
468
+ sim
469
+ for sim in scenarios
470
+ if self.scenarios.output_path.joinpath(sim.name, "Flooding").is_dir()
471
+ ]
472
+
473
+ for scn in scns_simulated:
474
+ if self.scenarios.equal_hazard_components(scn, scenario):
475
+ existing = self.scenarios.output_path.joinpath(scn.name, "Flooding")
476
+ path_new = self.scenarios.output_path.joinpath(
477
+ scenario.name, "Flooding"
478
+ )
479
+ _runner = ScenarioRunner(self, scenario=scn)
480
+
481
+ if _runner.impacts.hazard.has_run: # only copy results if the hazard model has actually finished and skip simulation folders
482
+ shutil.copytree(
483
+ existing,
484
+ path_new,
485
+ dirs_exist_ok=True,
486
+ ignore=shutil.ignore_patterns("simulations"),
487
+ )
488
+ self.logger.info(
489
+ f"Hazard simulation is used from the '{scn.name}' scenario"
490
+ )
491
+
492
+ def cleanup(self) -> None:
493
+ """
494
+ Remove corrupted scenario output.
495
+
496
+ This method removes any scenario output that:
497
+ - is corrupted due to unfinished runs
498
+ - does not have a corresponding input
499
+
500
+ """
501
+ if not self.scenarios.output_path.is_dir():
502
+ return
503
+
504
+ input_scenarios = [
505
+ (self.scenarios.input_path / dir).resolve()
506
+ for dir in os.listdir(self.scenarios.input_path)
507
+ ]
508
+ output_scenarios = [
509
+ (self.scenarios.output_path / dir).resolve()
510
+ for dir in os.listdir(self.scenarios.output_path)
511
+ ]
512
+
513
+ def _call_garbage_collector(func, path, exc_info, retries=5, delay=0.1):
514
+ """Retry deletion up to 5 times if the file is locked."""
515
+ for attempt in range(retries):
516
+ gc.collect()
517
+ time.sleep(delay)
518
+ try:
519
+ func(path) # Retry deletion
520
+ return # Exit if successful
521
+ except Exception as e:
522
+ print(
523
+ f"Attempt {attempt + 1}/{retries} failed to delete {path}: {e}"
524
+ )
525
+
526
+ print(f"Giving up on deleting {path} after {retries} attempts.")
527
+
528
+ for dir in output_scenarios:
529
+ # Delete if: input was deleted or corrupted output due to unfinished run
530
+ if dir.name not in [
531
+ path.name for path in input_scenarios
532
+ ] or not finished_file_exists(dir):
533
+ shutil.rmtree(dir, onerror=_call_garbage_collector)