flood-adapt 0.3.9__py3-none-any.whl → 0.3.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flood_adapt/__init__.py +26 -22
- flood_adapt/adapter/__init__.py +9 -9
- flood_adapt/adapter/fiat_adapter.py +1541 -1541
- flood_adapt/adapter/interface/hazard_adapter.py +70 -70
- flood_adapt/adapter/interface/impact_adapter.py +36 -36
- flood_adapt/adapter/interface/model_adapter.py +89 -89
- flood_adapt/adapter/interface/offshore.py +19 -19
- flood_adapt/adapter/sfincs_adapter.py +1853 -1848
- flood_adapt/adapter/sfincs_offshore.py +187 -193
- flood_adapt/config/config.py +248 -248
- flood_adapt/config/fiat.py +219 -219
- flood_adapt/config/gui.py +331 -331
- flood_adapt/config/sfincs.py +481 -336
- flood_adapt/config/site.py +129 -129
- flood_adapt/database_builder/database_builder.py +2210 -2210
- flood_adapt/database_builder/templates/default_units/imperial.toml +9 -9
- flood_adapt/database_builder/templates/default_units/metric.toml +9 -9
- flood_adapt/database_builder/templates/green_infra_table/green_infra_lookup_table.csv +10 -10
- flood_adapt/database_builder/templates/infographics/OSM/config_charts.toml +90 -90
- flood_adapt/database_builder/templates/infographics/OSM/config_people.toml +57 -57
- flood_adapt/database_builder/templates/infographics/OSM/config_risk_charts.toml +121 -121
- flood_adapt/database_builder/templates/infographics/OSM/config_roads.toml +65 -65
- flood_adapt/database_builder/templates/infographics/OSM/styles.css +45 -45
- flood_adapt/database_builder/templates/infographics/US_NSI/config_charts.toml +126 -126
- flood_adapt/database_builder/templates/infographics/US_NSI/config_people.toml +60 -60
- flood_adapt/database_builder/templates/infographics/US_NSI/config_risk_charts.toml +121 -121
- flood_adapt/database_builder/templates/infographics/US_NSI/config_roads.toml +65 -65
- flood_adapt/database_builder/templates/infographics/US_NSI/styles.css +45 -45
- flood_adapt/database_builder/templates/infometrics/OSM/metrics_additional_risk_configs.toml +4 -4
- flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config.toml +143 -143
- flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config_risk.toml +153 -153
- flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config.toml +127 -127
- flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config_risk.toml +57 -57
- flood_adapt/database_builder/templates/infometrics/US_NSI/metrics_additional_risk_configs.toml +4 -4
- flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config.toml +191 -191
- flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config_risk.toml +153 -153
- flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config.toml +178 -178
- flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config_risk.toml +57 -57
- flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config.toml +9 -9
- flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config_risk.toml +65 -65
- flood_adapt/database_builder/templates/output_layers/bin_colors.toml +5 -5
- flood_adapt/database_builder.py +16 -16
- flood_adapt/dbs_classes/__init__.py +21 -21
- flood_adapt/dbs_classes/database.py +533 -684
- flood_adapt/dbs_classes/dbs_benefit.py +77 -76
- flood_adapt/dbs_classes/dbs_event.py +61 -59
- flood_adapt/dbs_classes/dbs_measure.py +112 -111
- flood_adapt/dbs_classes/dbs_projection.py +34 -34
- flood_adapt/dbs_classes/dbs_scenario.py +137 -137
- flood_adapt/dbs_classes/dbs_static.py +274 -273
- flood_adapt/dbs_classes/dbs_strategy.py +130 -129
- flood_adapt/dbs_classes/dbs_template.py +279 -278
- flood_adapt/dbs_classes/interface/database.py +107 -139
- flood_adapt/dbs_classes/interface/element.py +121 -121
- flood_adapt/dbs_classes/interface/static.py +47 -47
- flood_adapt/flood_adapt.py +1229 -1178
- flood_adapt/misc/database_user.py +16 -16
- flood_adapt/misc/exceptions.py +22 -0
- flood_adapt/misc/log.py +183 -183
- flood_adapt/misc/path_builder.py +54 -54
- flood_adapt/misc/utils.py +185 -185
- flood_adapt/objects/__init__.py +82 -82
- flood_adapt/objects/benefits/benefits.py +61 -61
- flood_adapt/objects/events/event_factory.py +135 -135
- flood_adapt/objects/events/event_set.py +88 -84
- flood_adapt/objects/events/events.py +236 -234
- flood_adapt/objects/events/historical.py +58 -58
- flood_adapt/objects/events/hurricane.py +68 -67
- flood_adapt/objects/events/synthetic.py +46 -50
- flood_adapt/objects/forcing/__init__.py +92 -92
- flood_adapt/objects/forcing/csv.py +68 -68
- flood_adapt/objects/forcing/discharge.py +66 -66
- flood_adapt/objects/forcing/forcing.py +150 -150
- flood_adapt/objects/forcing/forcing_factory.py +182 -182
- flood_adapt/objects/forcing/meteo_handler.py +93 -93
- flood_adapt/objects/forcing/netcdf.py +40 -40
- flood_adapt/objects/forcing/plotting.py +453 -429
- flood_adapt/objects/forcing/rainfall.py +98 -98
- flood_adapt/objects/forcing/tide_gauge.py +191 -191
- flood_adapt/objects/forcing/time_frame.py +90 -90
- flood_adapt/objects/forcing/timeseries.py +564 -564
- flood_adapt/objects/forcing/unit_system.py +580 -580
- flood_adapt/objects/forcing/waterlevels.py +108 -108
- flood_adapt/objects/forcing/wind.py +124 -124
- flood_adapt/objects/measures/measure_factory.py +92 -92
- flood_adapt/objects/measures/measures.py +551 -529
- flood_adapt/objects/object_model.py +74 -68
- flood_adapt/objects/projections/projections.py +103 -103
- flood_adapt/objects/scenarios/scenarios.py +22 -22
- flood_adapt/objects/strategies/strategies.py +89 -89
- flood_adapt/workflows/benefit_runner.py +579 -554
- flood_adapt/workflows/floodmap.py +85 -85
- flood_adapt/workflows/impacts_integrator.py +85 -85
- flood_adapt/workflows/scenario_runner.py +70 -70
- {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/LICENSE +674 -674
- {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/METADATA +867 -865
- flood_adapt-0.3.11.dist-info/RECORD +140 -0
- flood_adapt-0.3.9.dist-info/RECORD +0 -139
- {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/WHEEL +0 -0
- {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/top_level.txt +0 -0
|
@@ -1,684 +1,533 @@
|
|
|
1
|
-
import gc
|
|
2
|
-
import os
|
|
3
|
-
import shutil
|
|
4
|
-
import time
|
|
5
|
-
from pathlib import Path
|
|
6
|
-
from typing import Any, Literal, Optional, Union
|
|
7
|
-
|
|
8
|
-
import geopandas as gpd
|
|
9
|
-
import numpy as np
|
|
10
|
-
import pandas as pd
|
|
11
|
-
import xarray as xr
|
|
12
|
-
from
|
|
13
|
-
|
|
14
|
-
from
|
|
15
|
-
from
|
|
16
|
-
|
|
17
|
-
from flood_adapt.
|
|
18
|
-
from flood_adapt.dbs_classes.
|
|
19
|
-
from flood_adapt.dbs_classes.
|
|
20
|
-
from flood_adapt.dbs_classes.
|
|
21
|
-
from flood_adapt.dbs_classes.
|
|
22
|
-
from flood_adapt.dbs_classes.
|
|
23
|
-
from flood_adapt.dbs_classes.
|
|
24
|
-
from flood_adapt.
|
|
25
|
-
from flood_adapt.
|
|
26
|
-
from flood_adapt.misc.
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
from flood_adapt.
|
|
32
|
-
from flood_adapt.
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
self.
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
self.
|
|
112
|
-
self.
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
self.
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
self.
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
self.
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
)
|
|
128
|
-
self.
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
self.
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
"""
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
""
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
def
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
if
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
)
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
return gdfs
|
|
535
|
-
|
|
536
|
-
def get_aggregation_benefits(
|
|
537
|
-
self, benefit_name: str
|
|
538
|
-
) -> dict[str, gpd.GeoDataFrame]:
|
|
539
|
-
"""Get a dictionary with the aggregated benefits as geodataframes.
|
|
540
|
-
|
|
541
|
-
Parameters
|
|
542
|
-
----------
|
|
543
|
-
benefit_name : str
|
|
544
|
-
name of the benefit analysis
|
|
545
|
-
|
|
546
|
-
Returns
|
|
547
|
-
-------
|
|
548
|
-
dict[GeoDataFrame]
|
|
549
|
-
dictionary with aggregated benefits per aggregation type
|
|
550
|
-
"""
|
|
551
|
-
out_path = self.benefits.output_path.joinpath(
|
|
552
|
-
benefit_name,
|
|
553
|
-
)
|
|
554
|
-
gdfs = {}
|
|
555
|
-
for aggr_area in out_path.glob("benefits_*.gpkg"):
|
|
556
|
-
label = aggr_area.stem.split("benefits_")[-1]
|
|
557
|
-
gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
|
|
558
|
-
gdfs[label] = gdfs[label].to_crs(4326)
|
|
559
|
-
return gdfs
|
|
560
|
-
|
|
561
|
-
def get_object_list(
|
|
562
|
-
self,
|
|
563
|
-
object_type: Literal[
|
|
564
|
-
"projections", "events", "measures", "strategies", "scenarios", "benefits"
|
|
565
|
-
],
|
|
566
|
-
) -> dict[str, Any]:
|
|
567
|
-
"""Get a dictionary with all the toml paths and last modification dates that exist in the database that correspond to object_type.
|
|
568
|
-
|
|
569
|
-
Parameters
|
|
570
|
-
----------
|
|
571
|
-
object_type : str
|
|
572
|
-
Can be 'projections', 'events', 'measures', 'strategies' or 'scenarios'
|
|
573
|
-
|
|
574
|
-
Returns
|
|
575
|
-
-------
|
|
576
|
-
dict[str, Any]
|
|
577
|
-
Includes 'path' and 'last_modification_date' info
|
|
578
|
-
"""
|
|
579
|
-
match object_type:
|
|
580
|
-
case "projections":
|
|
581
|
-
return self.projections.summarize_objects()
|
|
582
|
-
case "events":
|
|
583
|
-
return self.events.summarize_objects()
|
|
584
|
-
case "measures":
|
|
585
|
-
return self.measures.summarize_objects()
|
|
586
|
-
case "strategies":
|
|
587
|
-
return self.strategies.summarize_objects()
|
|
588
|
-
case "scenarios":
|
|
589
|
-
return self.scenarios.summarize_objects()
|
|
590
|
-
case "benefits":
|
|
591
|
-
return self.benefits.summarize_objects()
|
|
592
|
-
case _:
|
|
593
|
-
raise ValueError(
|
|
594
|
-
f"Object type '{object_type}' is not valid. Must be one of 'projections', 'events', 'measures', 'strategies' or 'scenarios'."
|
|
595
|
-
)
|
|
596
|
-
|
|
597
|
-
def has_run_hazard(self, scenario_name: str) -> None:
|
|
598
|
-
"""Check if there is already a simulation that has the exact same hazard component.
|
|
599
|
-
|
|
600
|
-
If yes that is copied to avoid running the hazard model twice.
|
|
601
|
-
|
|
602
|
-
Parameters
|
|
603
|
-
----------
|
|
604
|
-
scenario_name : str
|
|
605
|
-
name of the scenario to check if needs to be rerun for hazard
|
|
606
|
-
"""
|
|
607
|
-
scenario = self.scenarios.get(scenario_name)
|
|
608
|
-
runner = ScenarioRunner(self, scenario=scenario)
|
|
609
|
-
|
|
610
|
-
# Dont do anything if the hazard model has already been run in itself
|
|
611
|
-
if runner.impacts.hazard.has_run:
|
|
612
|
-
return
|
|
613
|
-
|
|
614
|
-
scenarios = [
|
|
615
|
-
self.scenarios.get(scn)
|
|
616
|
-
for scn in self.scenarios.summarize_objects()["name"]
|
|
617
|
-
]
|
|
618
|
-
scns_simulated = [
|
|
619
|
-
sim
|
|
620
|
-
for sim in scenarios
|
|
621
|
-
if self.scenarios.output_path.joinpath(sim.name, "Flooding").is_dir()
|
|
622
|
-
]
|
|
623
|
-
|
|
624
|
-
for scn in scns_simulated:
|
|
625
|
-
if self.scenarios.equal_hazard_components(scn, scenario):
|
|
626
|
-
existing = self.scenarios.output_path.joinpath(scn.name, "Flooding")
|
|
627
|
-
path_new = self.scenarios.output_path.joinpath(
|
|
628
|
-
scenario.name, "Flooding"
|
|
629
|
-
)
|
|
630
|
-
_runner = ScenarioRunner(self, scenario=scn)
|
|
631
|
-
|
|
632
|
-
if _runner.impacts.hazard.has_run: # only copy results if the hazard model has actually finished and skip simulation folders
|
|
633
|
-
shutil.copytree(
|
|
634
|
-
existing,
|
|
635
|
-
path_new,
|
|
636
|
-
dirs_exist_ok=True,
|
|
637
|
-
ignore=shutil.ignore_patterns("simulations"),
|
|
638
|
-
)
|
|
639
|
-
self.logger.info(
|
|
640
|
-
f"Hazard simulation is used from the '{scn.name}' scenario"
|
|
641
|
-
)
|
|
642
|
-
|
|
643
|
-
def cleanup(self) -> None:
|
|
644
|
-
"""
|
|
645
|
-
Remove corrupted scenario output.
|
|
646
|
-
|
|
647
|
-
This method removes any scenario output that:
|
|
648
|
-
- is corrupted due to unfinished runs
|
|
649
|
-
- does not have a corresponding input
|
|
650
|
-
|
|
651
|
-
"""
|
|
652
|
-
if not self.scenarios.output_path.is_dir():
|
|
653
|
-
return
|
|
654
|
-
|
|
655
|
-
input_scenarios = [
|
|
656
|
-
(self.scenarios.input_path / dir).resolve()
|
|
657
|
-
for dir in os.listdir(self.scenarios.input_path)
|
|
658
|
-
]
|
|
659
|
-
output_scenarios = [
|
|
660
|
-
(self.scenarios.output_path / dir).resolve()
|
|
661
|
-
for dir in os.listdir(self.scenarios.output_path)
|
|
662
|
-
]
|
|
663
|
-
|
|
664
|
-
def _call_garbage_collector(func, path, exc_info, retries=5, delay=0.1):
|
|
665
|
-
"""Retry deletion up to 5 times if the file is locked."""
|
|
666
|
-
for attempt in range(retries):
|
|
667
|
-
gc.collect()
|
|
668
|
-
time.sleep(delay)
|
|
669
|
-
try:
|
|
670
|
-
func(path) # Retry deletion
|
|
671
|
-
return # Exit if successful
|
|
672
|
-
except Exception as e:
|
|
673
|
-
print(
|
|
674
|
-
f"Attempt {attempt + 1}/{retries} failed to delete {path}: {e}"
|
|
675
|
-
)
|
|
676
|
-
|
|
677
|
-
print(f"Giving up on deleting {path} after {retries} attempts.")
|
|
678
|
-
|
|
679
|
-
for dir in output_scenarios:
|
|
680
|
-
# Delete if: input was deleted or corrupted output due to unfinished run
|
|
681
|
-
if dir.name not in [
|
|
682
|
-
path.name for path in input_scenarios
|
|
683
|
-
] or not finished_file_exists(dir):
|
|
684
|
-
shutil.rmtree(dir, onerror=_call_garbage_collector)
|
|
1
|
+
import gc
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Literal, Optional, Union
|
|
7
|
+
|
|
8
|
+
import geopandas as gpd
|
|
9
|
+
import numpy as np
|
|
10
|
+
import pandas as pd
|
|
11
|
+
import xarray as xr
|
|
12
|
+
from geopandas import GeoDataFrame
|
|
13
|
+
|
|
14
|
+
from flood_adapt.config.sfincs import SlrScenariosModel
|
|
15
|
+
from flood_adapt.config.site import Site
|
|
16
|
+
from flood_adapt.dbs_classes.dbs_benefit import DbsBenefit
|
|
17
|
+
from flood_adapt.dbs_classes.dbs_event import DbsEvent
|
|
18
|
+
from flood_adapt.dbs_classes.dbs_measure import DbsMeasure
|
|
19
|
+
from flood_adapt.dbs_classes.dbs_projection import DbsProjection
|
|
20
|
+
from flood_adapt.dbs_classes.dbs_scenario import DbsScenario
|
|
21
|
+
from flood_adapt.dbs_classes.dbs_static import DbsStatic
|
|
22
|
+
from flood_adapt.dbs_classes.dbs_strategy import DbsStrategy
|
|
23
|
+
from flood_adapt.dbs_classes.interface.database import IDatabase
|
|
24
|
+
from flood_adapt.misc.exceptions import DatabaseError
|
|
25
|
+
from flood_adapt.misc.log import FloodAdaptLogging
|
|
26
|
+
from flood_adapt.misc.path_builder import (
|
|
27
|
+
TopLevelDir,
|
|
28
|
+
db_path,
|
|
29
|
+
)
|
|
30
|
+
from flood_adapt.misc.utils import finished_file_exists
|
|
31
|
+
from flood_adapt.objects.forcing import unit_system as us
|
|
32
|
+
from flood_adapt.workflows.scenario_runner import ScenarioRunner
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class Database(IDatabase):
|
|
36
|
+
"""Implementation of IDatabase class that holds the site information and has methods to get static data info, and all the input information.
|
|
37
|
+
|
|
38
|
+
Additionally it can manipulate (add, edit, copy and delete) any of the objects in the input.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
_instance = None
|
|
42
|
+
|
|
43
|
+
database_path: Union[str, os.PathLike]
|
|
44
|
+
database_name: str
|
|
45
|
+
_init_done: bool = False
|
|
46
|
+
|
|
47
|
+
base_path: Path
|
|
48
|
+
input_path: Path
|
|
49
|
+
static_path: Path
|
|
50
|
+
output_path: Path
|
|
51
|
+
|
|
52
|
+
_site: Site
|
|
53
|
+
|
|
54
|
+
_events: DbsEvent
|
|
55
|
+
_scenarios: DbsScenario
|
|
56
|
+
_strategies: DbsStrategy
|
|
57
|
+
_measures: DbsMeasure
|
|
58
|
+
_projections: DbsProjection
|
|
59
|
+
_benefits: DbsBenefit
|
|
60
|
+
|
|
61
|
+
_static: DbsStatic
|
|
62
|
+
|
|
63
|
+
def __new__(cls, *args, **kwargs):
|
|
64
|
+
if not cls._instance: # Singleton pattern
|
|
65
|
+
cls._instance = super(Database, cls).__new__(cls)
|
|
66
|
+
return cls._instance
|
|
67
|
+
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
database_path: Union[str, os.PathLike, None] = None,
|
|
71
|
+
database_name: Optional[str] = None,
|
|
72
|
+
) -> None:
|
|
73
|
+
"""
|
|
74
|
+
Initialize the DatabaseController object.
|
|
75
|
+
|
|
76
|
+
Parameters
|
|
77
|
+
----------
|
|
78
|
+
database_path : Union[str, os.PathLike]
|
|
79
|
+
The path to the database root
|
|
80
|
+
database_name : str
|
|
81
|
+
The name of the database.
|
|
82
|
+
-----
|
|
83
|
+
"""
|
|
84
|
+
if database_path is None or database_name is None:
|
|
85
|
+
if not self._init_done:
|
|
86
|
+
raise DatabaseError(
|
|
87
|
+
"""Database path and name must be provided for the first initialization.
|
|
88
|
+
To do this, run `flood_adapt.api.static.read_database(database_path, site_name)` first."""
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
return # Skip re-initialization
|
|
92
|
+
|
|
93
|
+
if (
|
|
94
|
+
self._init_done
|
|
95
|
+
and self.database_path == database_path
|
|
96
|
+
and self.database_name == database_name
|
|
97
|
+
):
|
|
98
|
+
return # Skip re-initialization
|
|
99
|
+
|
|
100
|
+
# If the database is not initialized, or a new path or name is provided, (re-)initialize
|
|
101
|
+
re_option = "re-" if self._init_done else ""
|
|
102
|
+
self.logger = FloodAdaptLogging.getLogger("Database")
|
|
103
|
+
self.logger.info(
|
|
104
|
+
f"{re_option}initializing database to {database_name} at {database_path}".capitalize()
|
|
105
|
+
)
|
|
106
|
+
self.database_path = database_path
|
|
107
|
+
self.database_name = database_name
|
|
108
|
+
|
|
109
|
+
# Set the paths
|
|
110
|
+
|
|
111
|
+
self.base_path = Path(database_path) / database_name
|
|
112
|
+
self.input_path = db_path(TopLevelDir.input)
|
|
113
|
+
self.static_path = db_path(TopLevelDir.static)
|
|
114
|
+
self.output_path = db_path(TopLevelDir.output)
|
|
115
|
+
|
|
116
|
+
self._site = Site.load_file(self.static_path / "config" / "site.toml")
|
|
117
|
+
|
|
118
|
+
# Initialize the different database objects
|
|
119
|
+
self._static = DbsStatic(self)
|
|
120
|
+
self._events = DbsEvent(
|
|
121
|
+
self, standard_objects=self.site.standard_objects.events
|
|
122
|
+
)
|
|
123
|
+
self._scenarios = DbsScenario(self)
|
|
124
|
+
self._strategies = DbsStrategy(
|
|
125
|
+
self, standard_objects=self.site.standard_objects.strategies
|
|
126
|
+
)
|
|
127
|
+
self._measures = DbsMeasure(self)
|
|
128
|
+
self._projections = DbsProjection(
|
|
129
|
+
self, standard_objects=self.site.standard_objects.projections
|
|
130
|
+
)
|
|
131
|
+
self._benefits = DbsBenefit(self)
|
|
132
|
+
|
|
133
|
+
# Delete any unfinished/crashed scenario output
|
|
134
|
+
self.cleanup()
|
|
135
|
+
|
|
136
|
+
self._init_done = True
|
|
137
|
+
|
|
138
|
+
def shutdown(self):
|
|
139
|
+
"""Explicitly shut down the singleton and clear all references."""
|
|
140
|
+
import gc
|
|
141
|
+
|
|
142
|
+
self._instance = None
|
|
143
|
+
self._init_done = False
|
|
144
|
+
|
|
145
|
+
self.__class__._instance = None
|
|
146
|
+
self.__dict__.clear()
|
|
147
|
+
gc.collect()
|
|
148
|
+
|
|
149
|
+
# Property methods
|
|
150
|
+
@property
|
|
151
|
+
def site(self) -> Site:
|
|
152
|
+
return self._site
|
|
153
|
+
|
|
154
|
+
@property
|
|
155
|
+
def static(self) -> DbsStatic:
|
|
156
|
+
return self._static
|
|
157
|
+
|
|
158
|
+
@property
|
|
159
|
+
def events(self) -> DbsEvent:
|
|
160
|
+
return self._events
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def scenarios(self) -> DbsScenario:
|
|
164
|
+
return self._scenarios
|
|
165
|
+
|
|
166
|
+
@property
|
|
167
|
+
def strategies(self) -> DbsStrategy:
|
|
168
|
+
return self._strategies
|
|
169
|
+
|
|
170
|
+
@property
|
|
171
|
+
def measures(self) -> DbsMeasure:
|
|
172
|
+
return self._measures
|
|
173
|
+
|
|
174
|
+
@property
|
|
175
|
+
def projections(self) -> DbsProjection:
|
|
176
|
+
return self._projections
|
|
177
|
+
|
|
178
|
+
@property
|
|
179
|
+
def benefits(self) -> DbsBenefit:
|
|
180
|
+
return self._benefits
|
|
181
|
+
|
|
182
|
+
def get_slr_scenarios(self) -> SlrScenariosModel:
|
|
183
|
+
"""Get the path to the SLR scenarios file.
|
|
184
|
+
|
|
185
|
+
Returns
|
|
186
|
+
-------
|
|
187
|
+
SlrScenariosModel
|
|
188
|
+
SLR scenarios configuration model with the file path set to the static path.
|
|
189
|
+
"""
|
|
190
|
+
if self.site.sfincs.slr_scenarios is None:
|
|
191
|
+
raise DatabaseError("No SLR scenarios defined in the site configuration.")
|
|
192
|
+
slr = self.site.sfincs.slr_scenarios
|
|
193
|
+
slr.file = str(self.static_path / slr.file)
|
|
194
|
+
return slr
|
|
195
|
+
|
|
196
|
+
def get_outputs(self) -> dict[str, Any]:
|
|
197
|
+
"""Return a dictionary with info on the outputs that currently exist in the database.
|
|
198
|
+
|
|
199
|
+
Returns
|
|
200
|
+
-------
|
|
201
|
+
dict[str, Any]
|
|
202
|
+
Includes 'name', 'path', 'last_modification_date' and "finished" info
|
|
203
|
+
"""
|
|
204
|
+
all_scenarios = pd.DataFrame(self._scenarios.summarize_objects())
|
|
205
|
+
if len(all_scenarios) > 0:
|
|
206
|
+
df = all_scenarios[all_scenarios["finished"]]
|
|
207
|
+
else:
|
|
208
|
+
df = all_scenarios
|
|
209
|
+
finished = df.drop(columns="finished").reset_index(drop=True)
|
|
210
|
+
return finished.to_dict()
|
|
211
|
+
|
|
212
|
+
def get_topobathy_path(self) -> str:
|
|
213
|
+
"""Return the path of the topobathy tiles in order to create flood maps with water level maps.
|
|
214
|
+
|
|
215
|
+
Returns
|
|
216
|
+
-------
|
|
217
|
+
str
|
|
218
|
+
path to topobathy tiles
|
|
219
|
+
"""
|
|
220
|
+
path = self.input_path.parent.joinpath("static", "dem", "tiles", "topobathy")
|
|
221
|
+
return str(path)
|
|
222
|
+
|
|
223
|
+
def get_index_path(self) -> str:
|
|
224
|
+
"""Return the path of the index tiles which are used to connect each water level cell with the topobathy tiles.
|
|
225
|
+
|
|
226
|
+
Returns
|
|
227
|
+
-------
|
|
228
|
+
str
|
|
229
|
+
path to index tiles
|
|
230
|
+
"""
|
|
231
|
+
path = self.input_path.parent.joinpath("static", "dem", "tiles", "indices")
|
|
232
|
+
return str(path)
|
|
233
|
+
|
|
234
|
+
def get_depth_conversion(self) -> float:
|
|
235
|
+
"""Return the flood depth conversion that is need in the gui to plot the flood map.
|
|
236
|
+
|
|
237
|
+
Returns
|
|
238
|
+
-------
|
|
239
|
+
float
|
|
240
|
+
conversion factor
|
|
241
|
+
"""
|
|
242
|
+
# Get conresion factor need to get from the sfincs units to the gui units
|
|
243
|
+
units = us.UnitfulLength(
|
|
244
|
+
value=1, units=self.site.gui.units.default_length_units
|
|
245
|
+
)
|
|
246
|
+
unit_cor = units.convert(new_units=us.UnitTypesLength.meters)
|
|
247
|
+
|
|
248
|
+
return unit_cor
|
|
249
|
+
|
|
250
|
+
def get_max_water_level(
|
|
251
|
+
self,
|
|
252
|
+
scenario_name: str,
|
|
253
|
+
return_period: Optional[int] = None,
|
|
254
|
+
) -> np.ndarray:
|
|
255
|
+
"""Return an array with the maximum water levels during an event.
|
|
256
|
+
|
|
257
|
+
Parameters
|
|
258
|
+
----------
|
|
259
|
+
scenario_name : str
|
|
260
|
+
name of scenario
|
|
261
|
+
return_period : int, optional
|
|
262
|
+
return period in years, by default None
|
|
263
|
+
|
|
264
|
+
Returns
|
|
265
|
+
-------
|
|
266
|
+
np.array
|
|
267
|
+
2D map of maximum water levels
|
|
268
|
+
"""
|
|
269
|
+
# If single event read with hydromt-sfincs
|
|
270
|
+
if not return_period:
|
|
271
|
+
map_path = self.scenarios.output_path.joinpath(
|
|
272
|
+
scenario_name,
|
|
273
|
+
"Flooding",
|
|
274
|
+
"max_water_level_map.nc",
|
|
275
|
+
)
|
|
276
|
+
with xr.open_dataarray(map_path) as map:
|
|
277
|
+
zsmax = map.to_numpy()
|
|
278
|
+
else:
|
|
279
|
+
file_path = self.scenarios.output_path.joinpath(
|
|
280
|
+
scenario_name,
|
|
281
|
+
"Flooding",
|
|
282
|
+
f"RP_{return_period:04d}_maps.nc",
|
|
283
|
+
)
|
|
284
|
+
with xr.open_dataset(file_path) as ds:
|
|
285
|
+
zsmax = ds["risk_map"][:, :].to_numpy().T
|
|
286
|
+
return zsmax
|
|
287
|
+
|
|
288
|
+
def get_flood_map_geotiff(
|
|
289
|
+
self,
|
|
290
|
+
scenario_name: str,
|
|
291
|
+
return_period: Optional[int] = None,
|
|
292
|
+
) -> Optional[Path]:
|
|
293
|
+
"""Return the path to the geotiff file with the flood map for the given scenario.
|
|
294
|
+
|
|
295
|
+
Parameters
|
|
296
|
+
----------
|
|
297
|
+
scenario_name : str
|
|
298
|
+
name of scenario
|
|
299
|
+
return_period : int, optional
|
|
300
|
+
return period in years, by default None. Only for risk scenarios.
|
|
301
|
+
|
|
302
|
+
Returns
|
|
303
|
+
-------
|
|
304
|
+
Optional[Path]
|
|
305
|
+
path to the flood map geotiff file, or None if it does not exist
|
|
306
|
+
"""
|
|
307
|
+
if not return_period:
|
|
308
|
+
file_path = self.scenarios.output_path.joinpath(
|
|
309
|
+
scenario_name,
|
|
310
|
+
"Flooding",
|
|
311
|
+
f"FloodMap_{scenario_name}.tif",
|
|
312
|
+
)
|
|
313
|
+
else:
|
|
314
|
+
file_path = self.scenarios.output_path.joinpath(
|
|
315
|
+
scenario_name,
|
|
316
|
+
"Flooding",
|
|
317
|
+
f"RP_{return_period:04d}_maps.tif",
|
|
318
|
+
)
|
|
319
|
+
if not file_path.is_file():
|
|
320
|
+
self.logger.warning(
|
|
321
|
+
f"Flood map for scenario '{scenario_name}' at {file_path} does not exist."
|
|
322
|
+
)
|
|
323
|
+
return None
|
|
324
|
+
return file_path
|
|
325
|
+
|
|
326
|
+
def get_building_footprints(self, scenario_name: str) -> GeoDataFrame:
|
|
327
|
+
"""Return a geodataframe of the impacts at the footprint level.
|
|
328
|
+
|
|
329
|
+
Parameters
|
|
330
|
+
----------
|
|
331
|
+
scenario_name : str
|
|
332
|
+
name of scenario
|
|
333
|
+
|
|
334
|
+
Returns
|
|
335
|
+
-------
|
|
336
|
+
GeoDataFrame
|
|
337
|
+
impacts at footprint level
|
|
338
|
+
"""
|
|
339
|
+
out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
|
|
340
|
+
footprints = out_path / f"Impacts_building_footprints_{scenario_name}.gpkg"
|
|
341
|
+
gdf = gpd.read_file(footprints, engine="pyogrio")
|
|
342
|
+
gdf = gdf.to_crs(4326)
|
|
343
|
+
return gdf
|
|
344
|
+
|
|
345
|
+
def get_roads(self, scenario_name: str) -> GeoDataFrame:
|
|
346
|
+
"""Return a geodataframe of the impacts at roads.
|
|
347
|
+
|
|
348
|
+
Parameters
|
|
349
|
+
----------
|
|
350
|
+
scenario_name : str
|
|
351
|
+
name of scenario
|
|
352
|
+
|
|
353
|
+
Returns
|
|
354
|
+
-------
|
|
355
|
+
GeoDataFrame
|
|
356
|
+
Impacts at roads
|
|
357
|
+
"""
|
|
358
|
+
out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
|
|
359
|
+
roads = out_path / f"Impacts_roads_{scenario_name}.gpkg"
|
|
360
|
+
gdf = gpd.read_file(roads, engine="pyogrio")
|
|
361
|
+
gdf = gdf.to_crs(4326)
|
|
362
|
+
return gdf
|
|
363
|
+
|
|
364
|
+
def get_aggregation(self, scenario_name: str) -> dict[str, gpd.GeoDataFrame]:
|
|
365
|
+
"""Return a dictionary with the aggregated impacts as geodataframes.
|
|
366
|
+
|
|
367
|
+
Parameters
|
|
368
|
+
----------
|
|
369
|
+
scenario_name : str
|
|
370
|
+
name of the scenario
|
|
371
|
+
|
|
372
|
+
Returns
|
|
373
|
+
-------
|
|
374
|
+
dict[GeoDataFrame]
|
|
375
|
+
dictionary with aggregated damages per aggregation type
|
|
376
|
+
"""
|
|
377
|
+
out_path = self.scenarios.output_path.joinpath(scenario_name, "Impacts")
|
|
378
|
+
gdfs = {}
|
|
379
|
+
for aggr_area in out_path.glob(f"Impacts_aggregated_{scenario_name}_*.gpkg"):
|
|
380
|
+
label = aggr_area.stem.split(f"{scenario_name}_")[-1]
|
|
381
|
+
gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
|
|
382
|
+
gdfs[label] = gdfs[label].to_crs(4326)
|
|
383
|
+
return gdfs
|
|
384
|
+
|
|
385
|
+
def get_aggregation_benefits(
|
|
386
|
+
self, benefit_name: str
|
|
387
|
+
) -> dict[str, gpd.GeoDataFrame]:
|
|
388
|
+
"""Get a dictionary with the aggregated benefits as geodataframes.
|
|
389
|
+
|
|
390
|
+
Parameters
|
|
391
|
+
----------
|
|
392
|
+
benefit_name : str
|
|
393
|
+
name of the benefit analysis
|
|
394
|
+
|
|
395
|
+
Returns
|
|
396
|
+
-------
|
|
397
|
+
dict[GeoDataFrame]
|
|
398
|
+
dictionary with aggregated benefits per aggregation type
|
|
399
|
+
"""
|
|
400
|
+
out_path = self.benefits.output_path.joinpath(
|
|
401
|
+
benefit_name,
|
|
402
|
+
)
|
|
403
|
+
gdfs = {}
|
|
404
|
+
for aggr_area in out_path.glob("benefits_*.gpkg"):
|
|
405
|
+
label = aggr_area.stem.split("benefits_")[-1]
|
|
406
|
+
gdfs[label] = gpd.read_file(aggr_area, engine="pyogrio")
|
|
407
|
+
gdfs[label] = gdfs[label].to_crs(4326)
|
|
408
|
+
return gdfs
|
|
409
|
+
|
|
410
|
+
def get_object_list(
|
|
411
|
+
self,
|
|
412
|
+
object_type: Literal[
|
|
413
|
+
"projections", "events", "measures", "strategies", "scenarios", "benefits"
|
|
414
|
+
],
|
|
415
|
+
) -> dict[str, Any]:
|
|
416
|
+
"""Get a dictionary with all the toml paths and last modification dates that exist in the database that correspond to object_type.
|
|
417
|
+
|
|
418
|
+
Parameters
|
|
419
|
+
----------
|
|
420
|
+
object_type : str
|
|
421
|
+
Can be 'projections', 'events', 'measures', 'strategies' or 'scenarios'
|
|
422
|
+
|
|
423
|
+
Returns
|
|
424
|
+
-------
|
|
425
|
+
dict[str, Any]
|
|
426
|
+
Includes 'path' and 'last_modification_date' info
|
|
427
|
+
"""
|
|
428
|
+
match object_type:
|
|
429
|
+
case "projections":
|
|
430
|
+
return self.projections.summarize_objects()
|
|
431
|
+
case "events":
|
|
432
|
+
return self.events.summarize_objects()
|
|
433
|
+
case "measures":
|
|
434
|
+
return self.measures.summarize_objects()
|
|
435
|
+
case "strategies":
|
|
436
|
+
return self.strategies.summarize_objects()
|
|
437
|
+
case "scenarios":
|
|
438
|
+
return self.scenarios.summarize_objects()
|
|
439
|
+
case "benefits":
|
|
440
|
+
return self.benefits.summarize_objects()
|
|
441
|
+
case _:
|
|
442
|
+
raise DatabaseError(
|
|
443
|
+
f"Object type '{object_type}' is not valid. Must be one of 'projections', 'events', 'measures', 'strategies' or 'scenarios'."
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
def has_run_hazard(self, scenario_name: str) -> None:
|
|
447
|
+
"""Check if there is already a simulation that has the exact same hazard component.
|
|
448
|
+
|
|
449
|
+
If yes that is copied to avoid running the hazard model twice.
|
|
450
|
+
|
|
451
|
+
Parameters
|
|
452
|
+
----------
|
|
453
|
+
scenario_name : str
|
|
454
|
+
name of the scenario to check if needs to be rerun for hazard
|
|
455
|
+
"""
|
|
456
|
+
scenario = self.scenarios.get(scenario_name)
|
|
457
|
+
runner = ScenarioRunner(self, scenario=scenario)
|
|
458
|
+
|
|
459
|
+
# Dont do anything if the hazard model has already been run in itself
|
|
460
|
+
if runner.impacts.hazard.has_run:
|
|
461
|
+
return
|
|
462
|
+
|
|
463
|
+
scenarios = [
|
|
464
|
+
self.scenarios.get(scn)
|
|
465
|
+
for scn in self.scenarios.summarize_objects()["name"]
|
|
466
|
+
]
|
|
467
|
+
scns_simulated = [
|
|
468
|
+
sim
|
|
469
|
+
for sim in scenarios
|
|
470
|
+
if self.scenarios.output_path.joinpath(sim.name, "Flooding").is_dir()
|
|
471
|
+
]
|
|
472
|
+
|
|
473
|
+
for scn in scns_simulated:
|
|
474
|
+
if self.scenarios.equal_hazard_components(scn, scenario):
|
|
475
|
+
existing = self.scenarios.output_path.joinpath(scn.name, "Flooding")
|
|
476
|
+
path_new = self.scenarios.output_path.joinpath(
|
|
477
|
+
scenario.name, "Flooding"
|
|
478
|
+
)
|
|
479
|
+
_runner = ScenarioRunner(self, scenario=scn)
|
|
480
|
+
|
|
481
|
+
if _runner.impacts.hazard.has_run: # only copy results if the hazard model has actually finished and skip simulation folders
|
|
482
|
+
shutil.copytree(
|
|
483
|
+
existing,
|
|
484
|
+
path_new,
|
|
485
|
+
dirs_exist_ok=True,
|
|
486
|
+
ignore=shutil.ignore_patterns("simulations"),
|
|
487
|
+
)
|
|
488
|
+
self.logger.info(
|
|
489
|
+
f"Hazard simulation is used from the '{scn.name}' scenario"
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
def cleanup(self) -> None:
|
|
493
|
+
"""
|
|
494
|
+
Remove corrupted scenario output.
|
|
495
|
+
|
|
496
|
+
This method removes any scenario output that:
|
|
497
|
+
- is corrupted due to unfinished runs
|
|
498
|
+
- does not have a corresponding input
|
|
499
|
+
|
|
500
|
+
"""
|
|
501
|
+
if not self.scenarios.output_path.is_dir():
|
|
502
|
+
return
|
|
503
|
+
|
|
504
|
+
input_scenarios = [
|
|
505
|
+
(self.scenarios.input_path / dir).resolve()
|
|
506
|
+
for dir in os.listdir(self.scenarios.input_path)
|
|
507
|
+
]
|
|
508
|
+
output_scenarios = [
|
|
509
|
+
(self.scenarios.output_path / dir).resolve()
|
|
510
|
+
for dir in os.listdir(self.scenarios.output_path)
|
|
511
|
+
]
|
|
512
|
+
|
|
513
|
+
def _call_garbage_collector(func, path, exc_info, retries=5, delay=0.1):
|
|
514
|
+
"""Retry deletion up to 5 times if the file is locked."""
|
|
515
|
+
for attempt in range(retries):
|
|
516
|
+
gc.collect()
|
|
517
|
+
time.sleep(delay)
|
|
518
|
+
try:
|
|
519
|
+
func(path) # Retry deletion
|
|
520
|
+
return # Exit if successful
|
|
521
|
+
except Exception as e:
|
|
522
|
+
print(
|
|
523
|
+
f"Attempt {attempt + 1}/{retries} failed to delete {path}: {e}"
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
print(f"Giving up on deleting {path} after {retries} attempts.")
|
|
527
|
+
|
|
528
|
+
for dir in output_scenarios:
|
|
529
|
+
# Delete if: input was deleted or corrupted output due to unfinished run
|
|
530
|
+
if dir.name not in [
|
|
531
|
+
path.name for path in input_scenarios
|
|
532
|
+
] or not finished_file_exists(dir):
|
|
533
|
+
shutil.rmtree(dir, onerror=_call_garbage_collector)
|