flood-adapt 1.0.0rc1__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flood_adapt/__init__.py +1 -1
- flood_adapt/adapter/__init__.py +0 -2
- flood_adapt/adapter/fiat_adapter.py +51 -51
- flood_adapt/adapter/interface/impact_adapter.py +1 -1
- flood_adapt/adapter/sfincs_adapter.py +69 -71
- flood_adapt/adapter/sfincs_offshore.py +5 -6
- flood_adapt/database_builder/database_builder.py +37 -5
- flood_adapt/dbs_classes/database.py +85 -21
- flood_adapt/dbs_classes/dbs_static.py +33 -9
- flood_adapt/dbs_classes/interface/static.py +8 -0
- flood_adapt/flood_adapt.py +28 -24
- flood_adapt/objects/forcing/forcing.py +1 -4
- flood_adapt/objects/forcing/tide_gauge.py +6 -7
- flood_adapt/objects/forcing/timeseries.py +0 -1
- flood_adapt/objects/output/floodmap.py +13 -0
- flood_adapt/workflows/scenario_runner.py +57 -31
- {flood_adapt-1.0.0rc1.dist-info → flood_adapt-1.0.2.dist-info}/LICENSE +69 -46
- {flood_adapt-1.0.0rc1.dist-info → flood_adapt-1.0.2.dist-info}/METADATA +70 -47
- {flood_adapt-1.0.0rc1.dist-info → flood_adapt-1.0.2.dist-info}/RECORD +21 -22
- flood_adapt/workflows/floodmap.py +0 -85
- flood_adapt/workflows/impacts_integrator.py +0 -85
- {flood_adapt-1.0.0rc1.dist-info → flood_adapt-1.0.2.dist-info}/WHEEL +0 -0
- {flood_adapt-1.0.0rc1.dist-info → flood_adapt-1.0.2.dist-info}/top_level.txt +0 -0
|
@@ -12,6 +12,7 @@ import xarray as xr
|
|
|
12
12
|
from geopandas import GeoDataFrame
|
|
13
13
|
|
|
14
14
|
from flood_adapt.config.hazard import SlrScenariosModel
|
|
15
|
+
from flood_adapt.config.impacts import FloodmapType
|
|
15
16
|
from flood_adapt.config.site import Site
|
|
16
17
|
from flood_adapt.dbs_classes.dbs_benefit import DbsBenefit
|
|
17
18
|
from flood_adapt.dbs_classes.dbs_event import DbsEvent
|
|
@@ -30,8 +31,11 @@ from flood_adapt.misc.path_builder import (
|
|
|
30
31
|
from flood_adapt.misc.utils import finished_file_exists
|
|
31
32
|
from flood_adapt.objects.events.events import Mode
|
|
32
33
|
from flood_adapt.objects.forcing import unit_system as us
|
|
34
|
+
from flood_adapt.objects.output.floodmap import FloodMap
|
|
33
35
|
from flood_adapt.workflows.scenario_runner import ScenarioRunner
|
|
34
36
|
|
|
37
|
+
logger = FloodAdaptLogging.getLogger("Database")
|
|
38
|
+
|
|
35
39
|
|
|
36
40
|
class Database(IDatabase):
|
|
37
41
|
"""Implementation of IDatabase class that holds the site information and has methods to get static data info, and all the input information.
|
|
@@ -100,8 +104,7 @@ class Database(IDatabase):
|
|
|
100
104
|
|
|
101
105
|
# If the database is not initialized, or a new path or name is provided, (re-)initialize
|
|
102
106
|
re_option = "re-" if self._init_done else ""
|
|
103
|
-
|
|
104
|
-
self.logger.info(
|
|
107
|
+
logger.info(
|
|
105
108
|
f"{re_option}initializing database to {database_name} at {database_path}".capitalize()
|
|
106
109
|
)
|
|
107
110
|
self.database_path = database_path
|
|
@@ -209,6 +212,71 @@ class Database(IDatabase):
|
|
|
209
212
|
finished = df.drop(columns="finished").reset_index(drop=True)
|
|
210
213
|
return finished.to_dict()
|
|
211
214
|
|
|
215
|
+
def get_floodmap(self, scenario_name: str) -> FloodMap:
|
|
216
|
+
"""Return the flood map for a given scenario.
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
scenario_name : str
|
|
221
|
+
Name of the scenario
|
|
222
|
+
|
|
223
|
+
Returns
|
|
224
|
+
-------
|
|
225
|
+
FloodMap
|
|
226
|
+
Flood map object containing the paths to the flood map files and their type.
|
|
227
|
+
"""
|
|
228
|
+
_type = self.site.fiat.config.floodmap_type
|
|
229
|
+
event = self.scenarios.get(scenario_name).event
|
|
230
|
+
mode = self.events.get(event).mode
|
|
231
|
+
base_dir = self.scenarios.output_path / scenario_name / "Flooding"
|
|
232
|
+
|
|
233
|
+
if mode == Mode.single_event:
|
|
234
|
+
if _type == FloodmapType.water_level:
|
|
235
|
+
paths = [base_dir / "max_water_level_map.nc"]
|
|
236
|
+
elif _type == FloodmapType.water_depth:
|
|
237
|
+
paths = [base_dir / f"FloodMap_{self.name}.tif"]
|
|
238
|
+
elif mode == Mode.risk:
|
|
239
|
+
if _type == FloodmapType.water_level:
|
|
240
|
+
paths = list(base_dir.glob("RP_*_maps.nc"))
|
|
241
|
+
elif _type == FloodmapType.water_depth:
|
|
242
|
+
paths = list(base_dir.glob("RP_*_maps.tif"))
|
|
243
|
+
else:
|
|
244
|
+
raise DatabaseError(
|
|
245
|
+
f"Flood map type '{_type}' is not valid. Must be one of 'water_level' or 'water_depth'."
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
return FloodMap(name=scenario_name, map_type=_type, mode=mode, paths=paths)
|
|
249
|
+
|
|
250
|
+
def get_impacts_path(self, scenario_name: str) -> Path:
|
|
251
|
+
"""Return the path to the impacts folder containing the impact runs for a given scenario.
|
|
252
|
+
|
|
253
|
+
Parameters
|
|
254
|
+
----------
|
|
255
|
+
scenario_name : str
|
|
256
|
+
Name of the scenario
|
|
257
|
+
|
|
258
|
+
Returns
|
|
259
|
+
-------
|
|
260
|
+
Path
|
|
261
|
+
Path to the impacts folder for the given scenario
|
|
262
|
+
"""
|
|
263
|
+
return self.scenarios.output_path.joinpath(scenario_name, "Impacts")
|
|
264
|
+
|
|
265
|
+
def get_flooding_path(self, scenario_name: str) -> Path:
|
|
266
|
+
"""Return the path to the flooding folder containing the hazard runs for a given scenario.
|
|
267
|
+
|
|
268
|
+
Parameters
|
|
269
|
+
----------
|
|
270
|
+
scenario_name : str
|
|
271
|
+
Name of the scenario
|
|
272
|
+
|
|
273
|
+
Returns
|
|
274
|
+
-------
|
|
275
|
+
Path
|
|
276
|
+
Path to the flooding folder for the given scenario
|
|
277
|
+
"""
|
|
278
|
+
return self.scenarios.output_path.joinpath(scenario_name, "Flooding")
|
|
279
|
+
|
|
212
280
|
def get_topobathy_path(self) -> str:
|
|
213
281
|
"""Return the path of the topobathy tiles in order to create flood maps with water level maps.
|
|
214
282
|
|
|
@@ -317,7 +385,7 @@ class Database(IDatabase):
|
|
|
317
385
|
f"RP_{return_period:04d}_maps.tif",
|
|
318
386
|
)
|
|
319
387
|
if not file_path.is_file():
|
|
320
|
-
|
|
388
|
+
logger.warning(
|
|
321
389
|
f"Flood map for scenario '{scenario_name}' at {file_path} does not exist."
|
|
322
390
|
)
|
|
323
391
|
return None
|
|
@@ -454,10 +522,9 @@ class Database(IDatabase):
|
|
|
454
522
|
name of the scenario to check if needs to be rerun for hazard
|
|
455
523
|
"""
|
|
456
524
|
scenario = self.scenarios.get(scenario_name)
|
|
457
|
-
runner = ScenarioRunner(self, scenario=scenario)
|
|
458
525
|
|
|
459
526
|
# Dont do anything if the hazard model has already been run in itself
|
|
460
|
-
if
|
|
527
|
+
if ScenarioRunner(self, scenario=scenario).hazard_run_check():
|
|
461
528
|
return
|
|
462
529
|
|
|
463
530
|
scenarios = [
|
|
@@ -476,16 +543,16 @@ class Database(IDatabase):
|
|
|
476
543
|
path_new = self.scenarios.output_path.joinpath(
|
|
477
544
|
scenario.name, "Flooding"
|
|
478
545
|
)
|
|
479
|
-
_runner = ScenarioRunner(self, scenario=scn)
|
|
480
546
|
|
|
481
|
-
if
|
|
547
|
+
if ScenarioRunner(self, scenario=scn).hazard_run_check():
|
|
548
|
+
# only copy results if the hazard model has actually finished and skip simulation folders
|
|
482
549
|
shutil.copytree(
|
|
483
550
|
existing,
|
|
484
551
|
path_new,
|
|
485
552
|
dirs_exist_ok=True,
|
|
486
553
|
ignore=shutil.ignore_patterns("simulations"),
|
|
487
554
|
)
|
|
488
|
-
|
|
555
|
+
logger.info(
|
|
489
556
|
f"Hazard simulation is used from the '{scn.name}' scenario"
|
|
490
557
|
)
|
|
491
558
|
|
|
@@ -509,9 +576,6 @@ class Database(IDatabase):
|
|
|
509
576
|
(self.scenarios.output_path / dir).resolve()
|
|
510
577
|
for dir in os.listdir(self.scenarios.output_path)
|
|
511
578
|
]
|
|
512
|
-
self.logger.info(
|
|
513
|
-
f"Cleaning up scenario outputs: {len(output_scenarios)} scenarios found."
|
|
514
|
-
)
|
|
515
579
|
|
|
516
580
|
def _call_garbage_collector(func, path, exc_info, retries=5, delay=0.1):
|
|
517
581
|
"""Retry deletion up to 5 times if the file is locked."""
|
|
@@ -528,15 +592,16 @@ class Database(IDatabase):
|
|
|
528
592
|
|
|
529
593
|
print(f"Giving up on deleting {path} after {retries} attempts.")
|
|
530
594
|
|
|
531
|
-
for
|
|
595
|
+
for _dir in output_scenarios:
|
|
532
596
|
# Delete if: input was deleted or corrupted output due to unfinished run
|
|
533
|
-
if
|
|
597
|
+
if _dir.name not in [
|
|
534
598
|
path.name for path in input_scenarios
|
|
535
|
-
] or not finished_file_exists(
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
599
|
+
] or not finished_file_exists(_dir):
|
|
600
|
+
logger.info(f"Cleaning up corrupted outputs of scenario: {_dir.name}.")
|
|
601
|
+
shutil.rmtree(_dir, onerror=_call_garbage_collector)
|
|
602
|
+
# If the scenario is finished, delete the simulation folders depending on `save_simulation`
|
|
603
|
+
elif finished_file_exists(_dir):
|
|
604
|
+
self._delete_simulations(_dir.name)
|
|
540
605
|
|
|
541
606
|
def _delete_simulations(self, scenario_name: str) -> None:
|
|
542
607
|
"""Delete all simulation folders for a given scenario.
|
|
@@ -556,7 +621,6 @@ class Database(IDatabase):
|
|
|
556
621
|
if sub_events:
|
|
557
622
|
for sub_event in sub_events:
|
|
558
623
|
overland._delete_simulation_folder(scn, sub_event=sub_event)
|
|
559
|
-
|
|
560
624
|
else:
|
|
561
625
|
overland._delete_simulation_folder(scn)
|
|
562
626
|
|
|
@@ -570,7 +634,7 @@ class Database(IDatabase):
|
|
|
570
634
|
)
|
|
571
635
|
if sim_path.exists():
|
|
572
636
|
shutil.rmtree(sim_path, ignore_errors=True)
|
|
573
|
-
|
|
637
|
+
logger.info(f"Deleted simulation folder: {sim_path}")
|
|
574
638
|
if sim_path.parent.exists() and not any(
|
|
575
639
|
sim_path.parent.iterdir()
|
|
576
640
|
):
|
|
@@ -580,7 +644,7 @@ class Database(IDatabase):
|
|
|
580
644
|
sim_path = offshore._get_simulation_path_offshore(scn)
|
|
581
645
|
if sim_path.exists():
|
|
582
646
|
shutil.rmtree(sim_path, ignore_errors=True)
|
|
583
|
-
|
|
647
|
+
logger.info(f"Deleted simulation folder: {sim_path}")
|
|
584
648
|
|
|
585
649
|
if sim_path.parent.exists() and not any(sim_path.parent.iterdir()):
|
|
586
650
|
# Remove the parent directory `simulations` if it is empty
|
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
|
-
from typing import Any, Callable, Tuple, Union
|
|
2
|
+
from typing import Any, Callable, Optional, Tuple, Union
|
|
3
3
|
|
|
4
4
|
import geopandas as gpd
|
|
5
5
|
import pandas as pd
|
|
6
6
|
from cht_cyclones.cyclone_track_database import CycloneTrackDatabase
|
|
7
7
|
|
|
8
8
|
from flood_adapt.adapter.fiat_adapter import FiatAdapter
|
|
9
|
+
from flood_adapt.adapter.interface.hazard_adapter import IHazardAdapter
|
|
10
|
+
from flood_adapt.adapter.interface.impact_adapter import IImpactAdapter
|
|
9
11
|
from flood_adapt.adapter.sfincs_adapter import SfincsAdapter
|
|
10
12
|
from flood_adapt.config.config import Settings
|
|
11
13
|
from flood_adapt.dbs_classes.interface.database import IDatabase
|
|
@@ -85,6 +87,7 @@ class DbsStatic(IDbsStatic):
|
|
|
85
87
|
def get_model_boundary(self) -> gpd.GeoDataFrame:
|
|
86
88
|
"""Get the model boundary from the SFINCS model."""
|
|
87
89
|
bnd = self.get_overland_sfincs_model().get_model_boundary()
|
|
90
|
+
bnd = bnd[["geometry"]]
|
|
88
91
|
return bnd
|
|
89
92
|
|
|
90
93
|
@cache_method_wrapper
|
|
@@ -100,19 +103,20 @@ class DbsStatic(IDbsStatic):
|
|
|
100
103
|
return grid
|
|
101
104
|
|
|
102
105
|
@cache_method_wrapper
|
|
103
|
-
def get_obs_points(self) -> gpd.GeoDataFrame:
|
|
106
|
+
def get_obs_points(self) -> Optional[gpd.GeoDataFrame]:
|
|
104
107
|
"""Get the observation points from the flood hazard model."""
|
|
108
|
+
if self._database.site.sfincs.obs_point is None:
|
|
109
|
+
return None
|
|
110
|
+
|
|
105
111
|
names = []
|
|
106
112
|
descriptions = []
|
|
107
113
|
lat = []
|
|
108
114
|
lon = []
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
lat.append(pt.lat)
|
|
115
|
-
lon.append(pt.lon)
|
|
115
|
+
for pt in self._database.site.sfincs.obs_point:
|
|
116
|
+
names.append(pt.name)
|
|
117
|
+
descriptions.append(pt.description)
|
|
118
|
+
lat.append(pt.lat)
|
|
119
|
+
lon.append(pt.lon)
|
|
116
120
|
|
|
117
121
|
# create gpd.GeoDataFrame from obs_points in site file
|
|
118
122
|
df = pd.DataFrame({"name": names, "description": descriptions})
|
|
@@ -223,6 +227,26 @@ class DbsStatic(IDbsStatic):
|
|
|
223
227
|
"""
|
|
224
228
|
return self.get_fiat_model().get_property_types()
|
|
225
229
|
|
|
230
|
+
def get_hazard_models(self) -> list[IHazardAdapter]:
|
|
231
|
+
"""Get the hazard models from the database.
|
|
232
|
+
|
|
233
|
+
Returns
|
|
234
|
+
-------
|
|
235
|
+
list[HazardAdapter]
|
|
236
|
+
List of hazard models
|
|
237
|
+
"""
|
|
238
|
+
return [self.get_overland_sfincs_model()]
|
|
239
|
+
|
|
240
|
+
def get_impact_models(self) -> list[IImpactAdapter]:
|
|
241
|
+
"""Get the impact models from the database.
|
|
242
|
+
|
|
243
|
+
Returns
|
|
244
|
+
-------
|
|
245
|
+
list[ImpactAdapter]
|
|
246
|
+
List of impact models
|
|
247
|
+
"""
|
|
248
|
+
return [self.get_fiat_model()]
|
|
249
|
+
|
|
226
250
|
def get_overland_sfincs_model(self) -> SfincsAdapter:
|
|
227
251
|
"""Get the template offshore SFINCS model."""
|
|
228
252
|
overland_path = (
|
|
@@ -6,6 +6,8 @@ import geopandas as gpd
|
|
|
6
6
|
import pandas as pd
|
|
7
7
|
from cht_cyclones.cyclone_track_database import CycloneTrackDatabase
|
|
8
8
|
|
|
9
|
+
from flood_adapt.adapter.interface.hazard_adapter import IHazardAdapter
|
|
10
|
+
from flood_adapt.adapter.interface.impact_adapter import IImpactAdapter
|
|
9
11
|
from flood_adapt.adapter.sfincs_adapter import SfincsAdapter
|
|
10
12
|
|
|
11
13
|
|
|
@@ -37,6 +39,12 @@ class IDbsStatic(ABC):
|
|
|
37
39
|
@abstractmethod
|
|
38
40
|
def get_property_types(self) -> list: ...
|
|
39
41
|
|
|
42
|
+
@abstractmethod
|
|
43
|
+
def get_impact_models(self) -> list[IImpactAdapter]: ...
|
|
44
|
+
|
|
45
|
+
@abstractmethod
|
|
46
|
+
def get_hazard_models(self) -> list[IHazardAdapter]: ...
|
|
47
|
+
|
|
40
48
|
@abstractmethod
|
|
41
49
|
def get_overland_sfincs_model(self) -> SfincsAdapter: ...
|
|
42
50
|
|
flood_adapt/flood_adapt.py
CHANGED
|
@@ -38,9 +38,10 @@ from flood_adapt.objects.projections.projections import Projection
|
|
|
38
38
|
from flood_adapt.objects.scenarios.scenarios import Scenario
|
|
39
39
|
from flood_adapt.objects.strategies.strategies import Strategy
|
|
40
40
|
from flood_adapt.workflows.benefit_runner import BenefitRunner
|
|
41
|
-
from flood_adapt.workflows.impacts_integrator import Impacts
|
|
42
41
|
from flood_adapt.workflows.scenario_runner import ScenarioRunner
|
|
43
42
|
|
|
43
|
+
logger = FloodAdaptLogging.getLogger()
|
|
44
|
+
|
|
44
45
|
|
|
45
46
|
class FloodAdapt:
|
|
46
47
|
database: Database
|
|
@@ -56,7 +57,6 @@ class FloodAdapt:
|
|
|
56
57
|
self.database = Database(
|
|
57
58
|
database_path=database_path.parent, database_name=database_path.name
|
|
58
59
|
)
|
|
59
|
-
self.logger = FloodAdaptLogging.getLogger()
|
|
60
60
|
|
|
61
61
|
# Measures
|
|
62
62
|
def get_measures(self) -> dict[str, Any]:
|
|
@@ -718,8 +718,7 @@ class FloodAdapt:
|
|
|
718
718
|
|
|
719
719
|
for scn in scenario_name:
|
|
720
720
|
scenario = self.get_scenario(scn)
|
|
721
|
-
|
|
722
|
-
runner.run()
|
|
721
|
+
ScenarioRunner(self.database, scenario=scenario).run()
|
|
723
722
|
|
|
724
723
|
# Outputs
|
|
725
724
|
def get_completed_scenarios(
|
|
@@ -858,7 +857,7 @@ class FloodAdapt:
|
|
|
858
857
|
"""
|
|
859
858
|
return self.database.get_roads(name)
|
|
860
859
|
|
|
861
|
-
def get_obs_point_timeseries(self, name: str) -> gpd.GeoDataFrame:
|
|
860
|
+
def get_obs_point_timeseries(self, name: str) -> Optional[gpd.GeoDataFrame]:
|
|
862
861
|
"""Return the HTML strings of the water level timeseries for the given scenario.
|
|
863
862
|
|
|
864
863
|
Parameters
|
|
@@ -868,27 +867,35 @@ class FloodAdapt:
|
|
|
868
867
|
|
|
869
868
|
Returns
|
|
870
869
|
-------
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
870
|
+
gdf : GeoDataFrame, optional
|
|
871
|
+
A GeoDataFrame with the observation points and their corresponding HTML paths for the timeseries.
|
|
872
|
+
Each row contains the station name and the path to the HTML file with the timeseries.
|
|
873
|
+
None if no observation points are found or if the scenario has not been run yet.
|
|
874
|
+
"""
|
|
875
|
+
obs_points = self.database.static.get_obs_points()
|
|
876
|
+
if obs_points is None:
|
|
877
|
+
logger.info(
|
|
878
|
+
"No observation points found in the sfincs model and site configuration."
|
|
879
|
+
)
|
|
880
|
+
return None
|
|
881
|
+
|
|
874
882
|
# Get the impacts objects from the scenario
|
|
875
883
|
scenario = self.database.scenarios.get(name)
|
|
876
|
-
hazard = Impacts(scenario).hazard
|
|
877
884
|
|
|
878
885
|
# Check if the scenario has run
|
|
879
|
-
if not
|
|
880
|
-
|
|
881
|
-
f"
|
|
886
|
+
if not ScenarioRunner(self.database, scenario=scenario).hazard_run_check():
|
|
887
|
+
logger.info(
|
|
888
|
+
f"Cannot retrieve observation point timeseries as the scenario {name} has not been run yet."
|
|
882
889
|
)
|
|
890
|
+
return None
|
|
883
891
|
|
|
884
|
-
output_path = self.database.
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
for station in gdf.name
|
|
892
|
+
output_path = self.database.get_flooding_path(scenario.name)
|
|
893
|
+
obs_points["html"] = [
|
|
894
|
+
str(output_path.joinpath(f"{station}_timeseries.html"))
|
|
895
|
+
for station in obs_points.name
|
|
889
896
|
]
|
|
890
897
|
|
|
891
|
-
return
|
|
898
|
+
return obs_points
|
|
892
899
|
|
|
893
900
|
def get_infographic(self, name: str) -> str:
|
|
894
901
|
"""Return the HTML string of the infographic for the given scenario.
|
|
@@ -1184,8 +1191,7 @@ class FloodAdapt:
|
|
|
1184
1191
|
scenarios : pd.DataFrame
|
|
1185
1192
|
A dataframe with the scenarios needed for this benefit assessment run.
|
|
1186
1193
|
"""
|
|
1187
|
-
|
|
1188
|
-
return runner.scenarios
|
|
1194
|
+
return BenefitRunner(self.database, benefit=benefit).scenarios
|
|
1189
1195
|
|
|
1190
1196
|
def create_benefit_scenarios(self, benefit: Benefit) -> None:
|
|
1191
1197
|
"""Create the benefit scenarios.
|
|
@@ -1195,8 +1201,7 @@ class FloodAdapt:
|
|
|
1195
1201
|
benefit : Benefit
|
|
1196
1202
|
The benefit object to create scenarios for.
|
|
1197
1203
|
"""
|
|
1198
|
-
|
|
1199
|
-
runner.create_benefit_scenarios()
|
|
1204
|
+
BenefitRunner(self.database, benefit=benefit).create_benefit_scenarios()
|
|
1200
1205
|
|
|
1201
1206
|
def run_benefit(self, name: Union[str, list[str]]) -> None:
|
|
1202
1207
|
"""Run the benefit assessment.
|
|
@@ -1210,8 +1215,7 @@ class FloodAdapt:
|
|
|
1210
1215
|
benefit_name = [name]
|
|
1211
1216
|
for name in benefit_name:
|
|
1212
1217
|
benefit = self.database.benefits.get(name)
|
|
1213
|
-
|
|
1214
|
-
runner.run_cost_benefit()
|
|
1218
|
+
BenefitRunner(self.database, benefit=benefit).run_cost_benefit()
|
|
1215
1219
|
|
|
1216
1220
|
def get_aggregated_benefits(self, name: str) -> dict[str, gpd.GeoDataFrame]:
|
|
1217
1221
|
"""Get the aggregation benefits for a benefit assessment.
|
|
@@ -1,15 +1,13 @@
|
|
|
1
|
-
import logging
|
|
2
1
|
import os
|
|
3
2
|
from abc import ABC, abstractmethod
|
|
4
3
|
from enum import Enum
|
|
5
4
|
from pathlib import Path
|
|
6
|
-
from typing import Any,
|
|
5
|
+
from typing import Any, List, Type
|
|
7
6
|
|
|
8
7
|
import tomli
|
|
9
8
|
from pydantic import BaseModel, field_serializer
|
|
10
9
|
|
|
11
10
|
from flood_adapt.config.hazard import RiverModel
|
|
12
|
-
from flood_adapt.misc.log import FloodAdaptLogging
|
|
13
11
|
|
|
14
12
|
|
|
15
13
|
### ENUMS ###
|
|
@@ -55,7 +53,6 @@ class IForcing(BaseModel, ABC):
|
|
|
55
53
|
|
|
56
54
|
type: ForcingType
|
|
57
55
|
source: ForcingSource
|
|
58
|
-
logger: ClassVar[logging.Logger] = FloodAdaptLogging.getLogger("Forcing")
|
|
59
56
|
|
|
60
57
|
@classmethod
|
|
61
58
|
def load_file(cls, path: Path):
|
|
@@ -12,6 +12,8 @@ from flood_adapt.objects.forcing import unit_system as us
|
|
|
12
12
|
from flood_adapt.objects.forcing.time_frame import TimeFrame
|
|
13
13
|
from flood_adapt.objects.forcing.timeseries import CSVTimeseries
|
|
14
14
|
|
|
15
|
+
logger = FloodAdaptLogging.getLogger("TideGauge")
|
|
16
|
+
|
|
15
17
|
|
|
16
18
|
class TideGaugeSource(str, Enum):
|
|
17
19
|
"""The accepted input for the variable source in tide_gauge."""
|
|
@@ -61,7 +63,6 @@ class TideGauge(BaseModel):
|
|
|
61
63
|
) # units of the water levels in the downloaded file
|
|
62
64
|
|
|
63
65
|
_cached_data: ClassVar[dict[str, pd.DataFrame]] = {}
|
|
64
|
-
logger: ClassVar = FloodAdaptLogging.getLogger("TideGauge")
|
|
65
66
|
|
|
66
67
|
@model_validator(mode="after")
|
|
67
68
|
def validate_selection_type(self) -> "TideGauge":
|
|
@@ -100,16 +101,14 @@ class TideGauge(BaseModel):
|
|
|
100
101
|
pd.DataFrame
|
|
101
102
|
Dataframe with time as index and the waterlevel for each observation station as columns.
|
|
102
103
|
"""
|
|
103
|
-
|
|
104
|
+
logger.info(f"Retrieving waterlevels for tide gauge {self.ID} for {time}")
|
|
104
105
|
if self.file:
|
|
105
106
|
gauge_data = self._read_imported_waterlevels(time=time, path=self.file)
|
|
106
107
|
else:
|
|
107
108
|
gauge_data = self._download_tide_gauge_data(time=time)
|
|
108
109
|
|
|
109
110
|
if gauge_data is None:
|
|
110
|
-
|
|
111
|
-
f"Could not retrieve waterlevels for tide gauge {self.ID}"
|
|
112
|
-
)
|
|
111
|
+
logger.warning(f"Could not retrieve waterlevels for tide gauge {self.ID}")
|
|
113
112
|
return pd.DataFrame()
|
|
114
113
|
|
|
115
114
|
gauge_data.columns = [f"waterlevel_{self.ID}"]
|
|
@@ -159,7 +158,7 @@ class TideGauge(BaseModel):
|
|
|
159
158
|
"""
|
|
160
159
|
cache_key = f"{self.ID}_{time.start_time}_{time.end_time}"
|
|
161
160
|
if cache_key in self.__class__._cached_data:
|
|
162
|
-
|
|
161
|
+
logger.info("Tide gauge data retrieved from cache")
|
|
163
162
|
return self.__class__._cached_data[cache_key]
|
|
164
163
|
|
|
165
164
|
try:
|
|
@@ -180,7 +179,7 @@ class TideGauge(BaseModel):
|
|
|
180
179
|
df = pd.DataFrame(data=series, index=index)
|
|
181
180
|
|
|
182
181
|
except COOPSAPIError as e:
|
|
183
|
-
|
|
182
|
+
logger.error(
|
|
184
183
|
f"Could not download tide gauge data for station {self.ID}. {e}"
|
|
185
184
|
)
|
|
186
185
|
return None
|
|
@@ -535,7 +535,6 @@ class TimeseriesFactory:
|
|
|
535
535
|
raise ValueError("SCS configuration not found in database.")
|
|
536
536
|
scs_file_name = scs_file_name or scs_config.file
|
|
537
537
|
scs_type = scs_type or scs_config.type
|
|
538
|
-
|
|
539
538
|
return ScsTimeseries(
|
|
540
539
|
duration=duration,
|
|
541
540
|
peak_time=peak_time,
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from flood_adapt.config.impacts import FloodmapType
|
|
6
|
+
from flood_adapt.objects.events.events import Mode
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FloodMap(BaseModel):
|
|
10
|
+
name: str
|
|
11
|
+
map_type: FloodmapType
|
|
12
|
+
mode: Mode
|
|
13
|
+
paths: list[Path]
|
|
@@ -3,7 +3,8 @@ from flood_adapt.dbs_classes.interface.database import IDatabase
|
|
|
3
3
|
from flood_adapt.misc.log import FloodAdaptLogging
|
|
4
4
|
from flood_adapt.misc.utils import finished_file_exists, write_finished_file
|
|
5
5
|
from flood_adapt.objects.scenarios.scenarios import Scenario
|
|
6
|
-
|
|
6
|
+
|
|
7
|
+
logger = FloodAdaptLogging.getLogger("ScenarioRunner")
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class ScenarioRunner:
|
|
@@ -11,12 +12,21 @@ class ScenarioRunner:
|
|
|
11
12
|
|
|
12
13
|
def __init__(self, database: IDatabase, scenario: Scenario) -> None:
|
|
13
14
|
"""Create a Scenario object."""
|
|
14
|
-
self.logger = FloodAdaptLogging.getLogger("ScenarioRunner")
|
|
15
15
|
self._database = database
|
|
16
|
-
self.
|
|
16
|
+
self._scenario = scenario
|
|
17
17
|
self.site_info = self._database.site
|
|
18
18
|
self.results_path = self._database.scenarios.output_path / self._scenario.name
|
|
19
19
|
|
|
20
|
+
@property
|
|
21
|
+
def impact_models(self):
|
|
22
|
+
"""Return the list of impact models."""
|
|
23
|
+
return self._database.static.get_impact_models()
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def hazard_models(self):
|
|
27
|
+
"""Return the list of hazard models."""
|
|
28
|
+
return self._database.static.get_hazard_models()
|
|
29
|
+
|
|
20
30
|
def _load_objects(self, scenario: Scenario) -> None:
|
|
21
31
|
"""Load objects from the database."""
|
|
22
32
|
self._scenario = scenario
|
|
@@ -24,12 +34,7 @@ class ScenarioRunner:
|
|
|
24
34
|
self._projection = self._database.projections.get(scenario.projection)
|
|
25
35
|
self._strategy = self._database.strategies.get(scenario.strategy)
|
|
26
36
|
|
|
27
|
-
|
|
28
|
-
def impacts(self) -> Impacts:
|
|
29
|
-
return Impacts(
|
|
30
|
-
scenario=self._scenario,
|
|
31
|
-
)
|
|
32
|
-
|
|
37
|
+
### General methods ###
|
|
33
38
|
def run(self) -> None:
|
|
34
39
|
"""Run hazard and impact models for the scenario."""
|
|
35
40
|
self._database.has_run_hazard(self._scenario.name)
|
|
@@ -39,28 +44,11 @@ class ScenarioRunner:
|
|
|
39
44
|
# Initiate the logger for all the integrator scripts.
|
|
40
45
|
log_file = self.results_path.joinpath(f"logfile_{self._scenario.name}.log")
|
|
41
46
|
with FloodAdaptLogging.to_file(file_path=log_file):
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
]
|
|
48
|
-
for hazard in hazard_models:
|
|
49
|
-
if not hazard.run_completed(self._scenario):
|
|
50
|
-
hazard.run(self._scenario)
|
|
51
|
-
else:
|
|
52
|
-
self.logger.info(
|
|
53
|
-
f"Hazard for scenario '{self._scenario.name}' has already been run."
|
|
54
|
-
)
|
|
55
|
-
|
|
56
|
-
if not self.impacts.has_run:
|
|
57
|
-
self.impacts.run()
|
|
58
|
-
else:
|
|
59
|
-
self.logger.info(
|
|
60
|
-
f"Impacts for scenario `{self._scenario.name}` has already been run."
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
self.logger.info(f"Finished evaluation of `{self._scenario.name}`")
|
|
47
|
+
logger.info(f"FloodAdapt version `{__version__}`")
|
|
48
|
+
logger.info(f"Started evaluation of `{self._scenario.name}`")
|
|
49
|
+
self._run_hazards()
|
|
50
|
+
self._run_impacts()
|
|
51
|
+
logger.info(f"Finished evaluation of `{self._scenario.name}`")
|
|
64
52
|
|
|
65
53
|
# write finished file to indicate that the scenario has been run
|
|
66
54
|
write_finished_file(self.results_path)
|
|
@@ -68,3 +56,41 @@ class ScenarioRunner:
|
|
|
68
56
|
def has_run_check(self):
|
|
69
57
|
"""Check if the scenario has been run."""
|
|
70
58
|
return finished_file_exists(self.results_path)
|
|
59
|
+
|
|
60
|
+
### Hazard methods ###
|
|
61
|
+
def _run_hazards(self) -> None:
|
|
62
|
+
"""Run the hazard model for the scenario."""
|
|
63
|
+
if self.hazard_run_check():
|
|
64
|
+
logger.info(f"Hazards for {self._scenario.name} have already been run.")
|
|
65
|
+
return
|
|
66
|
+
for model in self.hazard_models:
|
|
67
|
+
model.run(self._scenario)
|
|
68
|
+
|
|
69
|
+
def hazard_run_check(self) -> bool:
|
|
70
|
+
"""Check if the impact has been run.
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
bool
|
|
75
|
+
_description_
|
|
76
|
+
"""
|
|
77
|
+
return all(model.has_run(self._scenario) for model in self.hazard_models)
|
|
78
|
+
|
|
79
|
+
### Impact methods ###
|
|
80
|
+
def _run_impacts(self) -> None:
|
|
81
|
+
"""Run the impact model(s)."""
|
|
82
|
+
if self.impacts_run_check():
|
|
83
|
+
logger.info(f"Impacts for {self._scenario.name} have already been run.")
|
|
84
|
+
return
|
|
85
|
+
for model in self.impact_models:
|
|
86
|
+
model.run(self._scenario)
|
|
87
|
+
|
|
88
|
+
def impacts_run_check(self) -> bool:
|
|
89
|
+
"""Check if the impact has been run.
|
|
90
|
+
|
|
91
|
+
Returns
|
|
92
|
+
-------
|
|
93
|
+
bool
|
|
94
|
+
_description_
|
|
95
|
+
"""
|
|
96
|
+
return all(model.has_run(self._scenario) for model in self.impact_models)
|