wolfhece 2.1.99__py3-none-any.whl → 2.1.101__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. wolfhece/PyDraw.py +220 -29
  2. wolfhece/PyGui.py +1039 -53
  3. wolfhece/PyVertexvectors.py +2 -2
  4. wolfhece/Results2DGPU.py +37 -13
  5. wolfhece/acceptability/Parallels.py +2 -2
  6. wolfhece/acceptability/_add_path.py +23 -0
  7. wolfhece/acceptability/acceptability.py +594 -563
  8. wolfhece/acceptability/acceptability_gui.py +564 -331
  9. wolfhece/acceptability/cli.py +307 -120
  10. wolfhece/acceptability/func.py +1754 -1597
  11. wolfhece/apps/version.py +1 -1
  12. wolfhece/bernoulli/losses.py +76 -23
  13. wolfhece/bernoulli/losses_jax.py +143 -0
  14. wolfhece/bernoulli/pipe.py +7 -2
  15. wolfhece/gpuview.py +4 -1
  16. wolfhece/libs/__init__.py +11 -10
  17. wolfhece/libs/wolfogl.cp310-win_amd64.pyd +0 -0
  18. wolfhece/math_parser/__init__.py +4 -4
  19. wolfhece/math_parser/calculator.py +51 -9
  20. wolfhece/mesh2d/bc_manager.py +25 -2
  21. wolfhece/mesh2d/gpu_2d.py +644 -0
  22. wolfhece/mesh2d/simple_2d.py +2817 -0
  23. wolfhece/mesh2d/wolf2dprev.py +5 -2
  24. wolfhece/pidcontroller.py +131 -0
  25. wolfhece/pywalous.py +7 -7
  26. wolfhece/scenario/config_manager.py +98 -21
  27. wolfhece/wolf_array.py +391 -176
  28. wolfhece/wolf_vrt.py +108 -7
  29. wolfhece/wolfresults_2D.py +113 -6
  30. wolfhece/xyz_file.py +91 -51
  31. {wolfhece-2.1.99.dist-info → wolfhece-2.1.101.dist-info}/METADATA +3 -1
  32. {wolfhece-2.1.99.dist-info → wolfhece-2.1.101.dist-info}/RECORD +35 -30
  33. {wolfhece-2.1.99.dist-info → wolfhece-2.1.101.dist-info}/WHEEL +1 -1
  34. {wolfhece-2.1.99.dist-info → wolfhece-2.1.101.dist-info}/entry_points.txt +0 -0
  35. {wolfhece-2.1.99.dist-info → wolfhece-2.1.101.dist-info}/top_level.txt +0 -0
@@ -1,564 +1,595 @@
1
- """
2
- Author: University of Liege, HECE, LEMA
3
- Date: 2024
4
-
5
- Copyright (c) 2024 University of Liege. All rights reserved.
6
-
7
- This script and its content are protected by copyright law. Unauthorized
8
- copying or distribution of this file, via any medium, is strictly prohibited.
9
- """
10
-
11
- from .Parallels import parallel_gpd_clip, parallel_v2r, parallel_datamod
12
- from .func import data_modification, compute_vulnerability, compute_vulnerability4scenario
13
- from .func import match_vulnerability2sim, compute_acceptability, shp_to_raster, clip_layer
14
- from .func import Accept_Manager, cleaning_directory, EXTENT, Vulnerability_csv, compute_code
15
-
16
- import pandas as pd
17
- import os
18
- from osgeo import gdal
19
- import fiona
20
- import glob
21
- import numpy as np
22
- import geopandas as gpd
23
- from pathlib import Path
24
- import logging
25
- from tqdm import tqdm
26
- from enum import Enum
27
- from pyogrio import read_dataframe
28
-
29
- class steps_base_data_creation(Enum):
30
- """
31
- Enum for the steps in the base data creation
32
- """
33
- CLIP_GDB = 1
34
- CLIP_CADASTER = 2
35
- CLIP_PICC = 3
36
- POINTS2POLYS = 4
37
- RASTERIZE_IGN = 5
38
- PREPROCESS_VULNCODE = 6
39
- DATABASE_TO_RASTER = 7
40
-
41
- @classmethod
42
- def get_list_names(cls):
43
- return [f'{cur.name} - {cur.value}' for cur in cls]
44
-
45
- class steps_vulnerability(Enum):
46
- """
47
- Enum for the steps in the vulnerability computation
48
- """
49
- CREATE_RASTERS = 1
50
- CREATE_RASTERS_VULN = 10
51
- CREATE_RASTERS_CODE = 11
52
- APPLY_MODIFS = 2
53
- MATCH_SIMUL = 3
54
-
55
- @classmethod
56
- def get_list_names(cls):
57
- return [f'{cur.name} - {cur.value}' for cur in cls]
58
-
59
- class steps_acceptability(Enum):
60
- """
61
- Enum for the steps in the acceptability computation
62
- """
63
- COMPUTE_LOCAL_ACCEPT = 1
64
- LOAD_FROM_FILES = 2
65
- COMPUTE_MEAN_ACCEPT = 3
66
-
67
- @classmethod
68
- def get_list_names(cls):
69
- return [f'{cur.name} - {cur.value}' for cur in cls]
70
-
71
- def Base_data_creation(main_dir:str = 'Data',
72
- Original_gdb:str = 'GT_Resilence_dataRisques202010.gdb',
73
- Study_area:str = 'Bassin_Vesdre.shp',
74
- CaPa_Walloon:str = 'Cadastre_Walloon.gpkg',
75
- PICC_Walloon:str = 'PICC_vDIFF.gdb',
76
- CE_IGN_top10v:str = 'CE_IGN_TOP10V/CE_IGN_TOP10V.shp',
77
- resolution:float = 1.,
78
- number_procs:int = 8,
79
- steps:list[int] | list[steps_base_data_creation] = [1,2,3,4,5,6,7],
80
- Vuln_csv:str = 'Vulnerability.csv'):
81
- """
82
- Create the databse.
83
-
84
- In this step, the following operations are performed:
85
- - Clip the original gdb file to the study area
86
- - Clip the Cadastre Walloon file to the study area
87
- - Clip the PICC Walloon file to the study area
88
- - Clip and Rasterize the IGN top10v file
89
- - Create the study area database with the vulnerability levels
90
-
91
-
92
- :param main_dir: The main data directory
93
- :param Original_gdb: The original gdb file from SPW - GT Resilience
94
- :param Study_area: The study area shapefile -- Data will be clipped to this area
95
- :param CaPa_Walloon: The Cadastre Walloon file -- Shapfeile from SPW
96
- :param PICC_Walloon: The PICC Walloon file -- Shapefile from SPW
97
- :param CE_IGN_top10v: The CE "Cours d'eau" IGN top10v file -- Shapefile from IGN with river layer
98
- :param resolution: The output resolution of the raster files
99
- :param number_procs: The number of processors to use for parallel processing
100
-
101
- """
102
- LAYER_CABU = "CaBu"
103
- LAYER_CAPA = "CaPa"
104
- LAYER_BATIEMPRISE = "CONSTR_BATIEMPRISE"
105
-
106
- manager = Accept_Manager(main_dir,
107
- Study_area,
108
- Original_gdb=Original_gdb,
109
- CaPa_Walloon=CaPa_Walloon,
110
- PICC_Walloon=PICC_Walloon,
111
- CE_IGN_top10v=CE_IGN_top10v,
112
- Vuln_csv=Vuln_csv)
113
-
114
- if not manager.check_before_database_creation():
115
- logging.error("The necessary files are missing - Verify logs for more information")
116
- return
117
-
118
- done = []
119
-
120
- if 1 in steps or 6 in steps or steps_base_data_creation.PREPROCESS_VULNCODE in steps or steps_base_data_creation.CLIP_GDB in steps:
121
- # Load the vulnerability CSV to get the layers
122
- vulnerability_csv = Vulnerability_csv(manager.VULNERABILITY_CSV)
123
-
124
- if 1 in steps or steps_base_data_creation.CLIP_GDB in steps:
125
- # Clean the directory to avoid any conflict
126
- # GPKG driver does not overwrite the existing file but adds new layers
127
- cleaning_directory(manager.TMP_CLIPGDB)
128
-
129
- # ********************************************************************************************************************
130
- # Step 1, Clip Original GDB
131
-
132
- # Clip the GDB file and store it in output directory : manager.TMP_CLIPGDB
133
- parallel_gpd_clip(vulnerability_csv.get_layers(), manager.ORIGINAL_GDB, manager.SA, manager.TMP_CLIPGDB, number_procs)
134
-
135
- done.append(steps_base_data_creation.CLIP_GDB)
136
-
137
- if 2 in steps or steps_base_data_creation.CLIP_CADASTER in steps:
138
- # ********************************************************************************************************************
139
- # Step 2, Clip Cadaster data
140
- cleaning_directory(manager.TMP_CADASTER)
141
-
142
- # Only 2 layers are present in the Cadastre Walloon file
143
- # Clip the Cadastre Walloon file and store it in output directory : manager.TMP_CADASTER
144
- parallel_gpd_clip([LAYER_CABU, LAYER_CAPA], manager.CAPA_WALLOON, manager.SA, manager.TMP_CADASTER, min(2, number_procs))
145
-
146
- done.append(steps_base_data_creation.CLIP_CADASTER)
147
-
148
- if 3 in steps or steps_base_data_creation.CLIP_PICC in steps:
149
- # ********************************************************************************************************************
150
- # Step 3, Clip PICC data
151
- cleaning_directory(manager.TMP_PICC)
152
-
153
- # ONly 1 layer is needed from the PICC Walloon file
154
- # Clip the PICC Walloon file and store it in output dir : manager.TMP_PICC
155
- parallel_gpd_clip([LAYER_BATIEMPRISE], manager.PICC_WALLOON, manager.SA, manager.TMP_PICC, min(1, number_procs))
156
-
157
- done.append(steps_base_data_creation.CLIP_PICC)
158
-
159
- if 4 in steps or steps_base_data_creation.POINTS2POLYS in steps:
160
- # ********************************************************************************************************************
161
- # Step 4, create database based on changes in report
162
-
163
- cleaning_directory(manager.TMP_WMODIF)
164
-
165
- # PreLoad Picc and CaPa from clipped files
166
- Picc:gpd.GeoDataFrame = read_dataframe(str(manager.TMP_PICC / (LAYER_BATIEMPRISE+EXTENT)), layer=LAYER_BATIEMPRISE)
167
- CaPa:gpd.GeoDataFrame = read_dataframe(str(manager.TMP_CADASTER / (LAYER_CAPA+EXTENT)), layer=LAYER_CAPA)
168
-
169
- assert Picc.crs == CaPa.crs, "The crs of the two shapefiles are different"
170
-
171
- parallel_datamod(manager=manager, picc=Picc, capa=CaPa, number_procs=number_procs)
172
-
173
- done.append(steps_base_data_creation.POINTS2POLYS)
174
-
175
- if 5 in steps or steps_base_data_creation.RASTERIZE_IGN in steps:
176
- # ********************************************************************************************************************
177
- # Step 5 : Rasaterize the IGN data "Course d'eau" to get the riverbed mask
178
- LAYER_IGN = "CE_IGN_TOP10V"
179
- clip_layer(layer=LAYER_IGN, file_path=manager.CE_IGN_TOP10V, Study_Area=manager.SA, output_dir=manager.TMP_IGNCE)
180
- shp_to_raster(manager.TMP_IGNCE / (LAYER_IGN + '.gpkg'), manager.SA_MASKED_RIVER, resolution, manager=manager)
181
-
182
- done.append(steps_base_data_creation.RASTERIZE_IGN)
183
-
184
- if 6 in steps or steps_base_data_creation.PREPROCESS_VULNCODE in steps:
185
- # ********************************************************************************************************************
186
- # Step 6 : Pre-processing for Vulnerability
187
- # Save the database with vulnerability levels and codes
188
- # This database will be rasterized in 'Database_to_raster'
189
-
190
- layers_sa = manager.get_layers_in_wmodif()
191
- layers_csv = vulnerability_csv.get_layers()
192
-
193
- # Search difference between the two lists of layers
194
- list_shp = list(set(layers_csv).difference(layers_sa))
195
-
196
- logging.info("Excluded layers due to no features in shapefiles:")
197
- logging.info(list_shp)
198
-
199
- not_in_csv = [curlayer for curlayer in layers_sa if curlayer not in layers_csv]
200
- if len(not_in_csv) > 0:
201
- logging.error("Not treated layers due to no vulnerability level or code:")
202
- logging.error(not_in_csv)
203
-
204
- logging.info("STEP1: Saving the database for Vulnerability with attributes Vulne and Code")
205
-
206
- for curlayer in layers_sa:
207
- logging.info(curlayer)
208
-
209
- in_file = str(manager.TMP_WMODIF / (curlayer+EXTENT))
210
- out_file = str(manager.TMP_CODEVULNE / (curlayer+EXTENT))
211
-
212
- shp:gpd.GeoDataFrame = gpd.read_file(in_file)
213
-
214
- nb_lines, _ = shp.shape
215
- if nb_lines > 0:
216
- shp["Path"] = curlayer
217
- shp["Vulne"] = vulnerability_csv.get_vulnerability_level(curlayer)
218
- shp["Code"] = vulnerability_csv.get_vulnerability_code(curlayer)
219
- shp = shp[["geometry", "Path", "Vulne","Code"]]
220
- shp.to_file(out_file)
221
- else:
222
- # Normally, Phase 1 should have removed the empty shapefiles
223
- # But, we never know... ;-)
224
- logging.warning(f"Empty shapefile {curlayer} in {in_file}")
225
-
226
- done.append(steps_base_data_creation.PREPROCESS_VULNCODE)
227
-
228
- if 7 in steps or steps_base_data_creation.DATABASE_TO_RASTER in steps:
229
- # Rasterize the database
230
- cleaning_directory(manager.TMP_RASTERS)
231
- cleaning_directory(manager.TMP_RASTERS_CODE)
232
- cleaning_directory(manager.TMP_RASTERS_VULNE)
233
-
234
- Database_to_raster(main_dir,
235
- Study_area,
236
- resolution,
237
- number_procs=number_procs,
238
- Vuln_csv=Vuln_csv)
239
-
240
- done.append(steps_base_data_creation.DATABASE_TO_RASTER)
241
-
242
- return done
243
-
244
- def Database_to_raster(main_dir:str = 'Data',
245
- Study_area:str = 'Bassin_Vesdre.shp',
246
- resolution:float = 1.,
247
- number_procs:int = 16,
248
- Vuln_csv:str = 'Vulnerability.csv'):
249
- """
250
- Convert the vector database to raster database based on their vulnerability values
251
-
252
- Each leyer is converted to a raster file with the vulnerability values
253
- and the code values.
254
-
255
- They are stored in the TEMP/DATABASES/*StudyArea*/VULNERABILITY/RASTERS in:
256
- - Code
257
- - Vulne
258
-
259
- :param main_dir: The main data directory
260
- :param Study_area: The study area shapefile
261
- :param resolution: The resolution of the output raster files - default is 1 meter
262
- :param number_procs: The number of processors to use for parallel processing
263
-
264
- The parallel processing is safe as each layer is processed independently.
265
- """
266
-
267
- manager = Accept_Manager(main_dir, Study_area, Vuln_csv=Vuln_csv)
268
-
269
- resolution = float(resolution)
270
-
271
- if not manager.check_before_rasterize():
272
- logging.error("The necessary files are missing - Verify logs for more information")
273
- return
274
-
275
- logging.info("Convert vectors to raster based on their vulnerability values")
276
-
277
- attributes = ["Vulne", "Code"]
278
- for cur_attrib in attributes:
279
- parallel_v2r(manager, cur_attrib, resolution, number_procs, convert_to_sparse=True)
280
-
281
- def Vulnerability(main_dir:str = 'Data',
282
- scenario:str = 'Scenario1',
283
- Study_area:str = 'Bassin_Vesdre.shp',
284
- resolution:float = 1.,
285
- steps:list[int] | list[steps_vulnerability] = [1,10,11,2,3],
286
- Vuln_csv:str = 'Vulnerability.csv',
287
- Intermediate_csv:str = 'Intermediate.csv'):
288
- """
289
- Compute the vulnerability for the study area and the scenario, if needed.
290
-
291
- The vulnerability is computed in 3 steps:
292
- 1. Compute the vulnerability for the study area
293
- 2. Compute the vulnerability for the scenario
294
- 3. Clip the vulnerability rasters to the simulation area
295
-
296
- During step 3, three matrices are computed and clipped to the simulation area:
297
- - Vulnerability
298
- - Code
299
- - Masked River
300
-
301
- :param main_dir: The main data directory
302
- :param scenario: The scenario name
303
- :param Study_area: The study area shapefile
304
- :param resolution: The resolution of the output raster files - default is 1 meter
305
- :param steps: The steps to compute the vulnerability - default is [1,2,3]
306
-
307
- To be more rapid, the steps can be computed separately.
308
- - [1,2,3] : All steps are computed - Necessary for the first time
309
- - [2,3] : Only the scenario and clipping steps are computed -- Useful for scenario changes
310
- - [3] : Only the clipping step is computed -- Useful if simulation area changes but scenario is the same
311
-
312
- """
313
-
314
- manager = Accept_Manager(main_dir,
315
- Study_area,
316
- scenario=scenario,
317
- Vuln_csv=Vuln_csv,
318
- Intermediate_csv=Intermediate_csv)
319
-
320
- if not manager.check_before_vulnerability():
321
- logging.error("The necessary files are missing - Verify logs for more information")
322
- return
323
-
324
- logging.info("Starting VULNERABILITY computations at {} m resolution".format(resolution))
325
-
326
- done = []
327
-
328
- if 1 in steps or steps_vulnerability.CREATE_RASTERS in steps:
329
- # Step 1 : Compute the vulnerability rasters for the study area
330
- # The data **will not** be impacted by the scenario modifications
331
-
332
- logging.info("Generate Vulnerability rasters {}m".format(resolution))
333
-
334
- cleaning_directory(manager.TMP_SCEN_DIR)
335
-
336
- if 10 in steps or steps_vulnerability.CREATE_RASTERS_VULN in steps:
337
- compute_vulnerability(manager)
338
- done.append(steps_vulnerability.CREATE_RASTERS_VULN)
339
-
340
- if 11 in steps or steps_vulnerability.CREATE_RASTERS_CODE in steps:
341
- compute_code(manager)
342
- done.append(steps_vulnerability.CREATE_RASTERS_CODE)
343
-
344
- done.append(steps_vulnerability.CREATE_RASTERS)
345
-
346
- if 2 in steps or steps_vulnerability.APPLY_MODIFS in steps:
347
- # Step 2 : Compute the vulnerability rasters for the scenario
348
- # The data **will be** impacted by the scenario modifications
349
-
350
- if not manager.check_vuln_code_sa():
351
- logging.error("The vulnerability and code files for the study area are missing")
352
- logging.warning("Force the computation even if not prescribed in the steps")
353
-
354
- Vulnerability(main_dir, scenario, Study_area, resolution, [1])
355
-
356
- bu:list[Path] = manager.get_files_in_rm_buildings()
357
-
358
- if len(bu)>0:
359
- for curfile in bu:
360
- tiff_file = manager.TMP_RM_BUILD_DIR / (curfile.stem + ".tiff")
361
- shp_to_raster(curfile, tiff_file)
362
-
363
- compute_vulnerability4scenario(manager)
364
- else:
365
- logging.warning(f"No buildings were removed in water depth analysis OR No shapefiles in {manager.IN_RM_BUILD_DIR}")
366
-
367
- done.append(steps_vulnerability.APPLY_MODIFS)
368
-
369
- if 3 in steps or steps_vulnerability.MATCH_SIMUL in steps:
370
- # Step 3 : Clip the vulnerability/code rasters to the **simulation area**
371
-
372
- logging.info("Save Vulnerability files for the area of interest")
373
-
374
- return_periods = manager.get_return_periods()
375
- TMAX = manager.get_filepath_for_return_period(return_periods[-1])
376
-
377
- if TMAX is None:
378
- logging.error("The file for the maximum return period is missing")
379
- return
380
-
381
- match_vulnerability2sim(manager.SA_MASKED_RIVER,manager.OUT_MASKED_RIVER, TMAX)
382
- match_vulnerability2sim(manager.SA_VULN, manager.OUT_VULN, TMAX)
383
- match_vulnerability2sim(manager.SA_CODE, manager.OUT_CODE, TMAX)
384
-
385
- done.append(steps_vulnerability.MATCH_SIMUL)
386
-
387
- return done
388
-
389
- def Acceptability(main_dir:str = 'Vesdre',
390
- scenario:str = 'Scenario1',
391
- Study_area:str = 'Bassin_Vesdre.shp',
392
- coeff_auto:bool = True,
393
- Ponderation_csv:str = 'Ponderation.csv',
394
- resample_size:int = 100,
395
- steps:list[int] | list[steps_acceptability] = [1,2,3]):
396
- """ Compute acceptability for the scenario """
397
-
398
- done = []
399
-
400
- manager = Accept_Manager(main_dir,
401
- Study_area,
402
- scenario=scenario,
403
- Ponderation_csv=Ponderation_csv)
404
-
405
- # Load the vulnerability raster **for the scenario**
406
- vulne = gdal.Open(str(manager.OUT_VULN))
407
-
408
- # Load the river mask
409
- riv = gdal.Open(str(manager.OUT_MASKED_RIVER))
410
-
411
- # Get the geotransform and projection for the output tiff
412
- geotrans = riv.GetGeoTransform()
413
- proj = riv.GetProjection()
414
-
415
- assert vulne.GetGeoTransform() == riv.GetGeoTransform(), "The geotransform of the two rasters is different"
416
- assert vulne.GetProjection() == riv.GetProjection(), "The projection of the two rasters is different"
417
-
418
- # Convert to numpy array
419
- vulne = vulne.GetRasterBand(1).ReadAsArray()
420
- riv = riv.GetRasterBand(1).ReadAsArray()
421
-
422
- # Get the return periods available
423
- return_periods = manager.get_return_periods()
424
-
425
- # Prepare the river bed filter
426
- # Useful as we iterate over the return periods
427
- # and the river bed is the same for all return periods
428
- ij_riv = np.argwhere(riv == 1)
429
-
430
- # Initialize the dictionary to store the acceptability values
431
- part_accept = {}
432
-
433
- if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
434
- # Compute acceptability for each return period
435
- for curT in tqdm(return_periods):
436
-
437
- # Load the **FILLED** modelled water depth for the return period
438
- model_h = gdal.Open(str(manager.get_sim_file_for_return_period(curT)))
439
- # Convert to numpy array
440
- model_h = model_h.GetRasterBand(1).ReadAsArray()
441
-
442
- assert model_h.shape == vulne.shape, "The shape of the modelled water depth is different from the vulnerability raster"
443
-
444
- # Set 0. if the water depth is 0.
445
- model_h[model_h == 0] = 0
446
- # Set 0. in the river bed
447
- model_h[ij_riv[:,0], ij_riv[:,1]] = 0
448
-
449
- assert model_h[ij_riv[0][0], ij_riv[0][1]] == 0, "The river bed is not set to 0 in the modelled water depth"
450
- assert model_h.max() > 0, "The maximum water depth is 0"
451
- if model_h.min() < 0:
452
- logging.warning("The minimum water depth is negative - {} cells".format(np.count_nonzero(model_h<0)))
453
- logging.warning("Setting the negative values to 0")
454
- model_h[model_h < 0] = 0
455
-
456
- logging.info("Return period {}".format(curT))
457
-
458
- # Compute the local acceptability for the return period
459
- part_accept[curT] = compute_acceptability(manager, model_h, vulne, curT, (geotrans, proj))
460
-
461
- done.append(steps_acceptability.COMPUTE_LOCAL_ACCEPT)
462
-
463
- # At this point, the local acceptability for each return period is computed
464
- # and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory.
465
- # The arrays are also stored in the part_accept dictionary.
466
-
467
- if 2 in steps or steps_acceptability.LOAD_FROM_FILES in steps:
468
- # Load/Reload the acceptability values from files
469
-
470
- if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
471
- # We have computed/updted the acceptibility values.
472
- # We do not need to reload them.
473
- logging.warning("The acceptability values have been computed in step 1 - avoid reloading")
474
- logging.info("If you want to reload the acceptability values, please remove step 1 from the list of steps")
475
- else:
476
-
477
- # Get the list of Q files
478
- qs = manager.get_q_files()
479
-
480
- # Iterate over the return periods
481
- for curT in return_periods:
482
- logging.info(curT)
483
-
484
- # We set the filename from the return period, not the "qs" list
485
- q_filename = manager.TMP_QFILES / "Q{}.tif".format(curT)
486
-
487
- # Check if the file exists
488
- assert q_filename.exists(), "The file {} does not exist".format(q_filename)
489
- # Check if the file is in the "qs" list
490
- assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
491
-
492
- # Load the Q file for the return period
493
- tmp_data = gdal.Open(str(q_filename))
494
- # Convert to numpy array
495
- part_accept[curT] = tmp_data.GetRasterBand(1).ReadAsArray()
496
-
497
- done.append(steps_acceptability.LOAD_FROM_FILES)
498
-
499
- if 3 in steps or steps_acceptability.COMPUTE_MEAN_ACCEPT in steps:
500
-
501
- assert len(part_accept) == len(return_periods), "The number of acceptability files is not equal to the number of return periods"
502
-
503
- # Pointing the last return period, maybe 1000 but not always
504
- array_tmax = part_accept[return_periods[-1]]
505
-
506
- # Get ponderations for the return periods
507
- if coeff_auto:
508
- logging.info("Automatic ponderation")
509
- pond = manager.get_ponderations()
510
- assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
511
-
512
- elif manager.is_valid_ponderation_csv:
513
- logging.info("Manual ponderation")
514
- # Load the ponderation file
515
- pond = pd.read_csv(manager.PONDERATION_CSV)
516
- # Set the index to the interval, so we can use the interval as a key
517
- pond.set_index("Interval", inplace=True)
518
-
519
- else:
520
- logging.error("The ponderation file is missing")
521
- logging.info("Please provide the ponderation file or set 'coeff_auto' to True")
522
- return -1
523
-
524
- assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
525
-
526
- # Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
527
- comb = np.zeros(part_accept[return_periods[-1]].shape, dtype=np.float32)
528
-
529
- for curT in return_periods:
530
- assert part_accept[curT].dtype == np.float32, "The dtype of the acceptability matrix is not np.float32"
531
- assert part_accept[curT].shape == comb.shape, "The shape of the acceptability matrix is not the right one"
532
-
533
- comb += part_accept[curT] * float(pond["Ponderation"][curT])
534
-
535
- y_pixels, x_pixels = comb.shape # number of pixels in x
536
-
537
- # Set up output GeoTIFF
538
- driver = gdal.GetDriverByName('GTiff')
539
- dataset = driver.Create(str(manager.OUT_ACCEPT),
540
- x_pixels, y_pixels,
541
- 1,
542
- gdal.GDT_Float32,
543
- options=["COMPRESS=LZW"])
544
-
545
- assert comb.dtype == np.float32, "The dtype of the combined acceptability matrix is not np.float32"
546
-
547
- dataset.GetRasterBand(1).WriteArray(comb)
548
- dataset.SetGeoTransform(geotrans)
549
- dataset.SetProjection(proj)
550
- dataset.FlushCache()
551
- dataset=None
552
-
553
- # Resample to XXm
554
- Agg = gdal.Warp(str(manager.OUT_ACCEPT_100M),
555
- str(manager.OUT_ACCEPT),
556
- xRes=resample_size,
557
- yRes=resample_size,
558
- resampleAlg='Average')
559
- Agg.FlushCache()
560
- Agg = None
561
-
562
- done.append(steps_acceptability.COMPUTE_MEAN_ACCEPT)
563
-
1
+ """
2
+ Author: University of Liege, HECE, LEMA
3
+ Date: 2024
4
+
5
+ Copyright (c) 2024 University of Liege. All rights reserved.
6
+
7
+ This script and its content are protected by copyright law. Unauthorized
8
+ copying or distribution of this file, via any medium, is strictly prohibited.
9
+ """
10
+
11
+ from .Parallels import parallel_gpd_clip, parallel_v2r, parallel_datamod
12
+ from .func import data_modification, compute_vulnerability, compute_vulnerability4scenario
13
+ from .func import match_vulnerability2sim, compute_acceptability, shp_to_raster, clip_layer
14
+ from .func import Accept_Manager, cleaning_directory, EXTENT, Vulnerability_csv, compute_code
15
+
16
+ import pandas as pd
17
+ import os
18
+ from osgeo import gdal
19
+ import fiona
20
+ import glob
21
+ import numpy as np
22
+ import geopandas as gpd
23
+ from pathlib import Path
24
+ import logging
25
+ from tqdm import tqdm
26
+ from enum import Enum
27
+ from pyogrio import read_dataframe
28
+
29
+ class steps_base_data_creation(Enum):
30
+ """
31
+ Enum for the steps in the base data creation
32
+ """
33
+ CLIP_GDB = 1
34
+ CLIP_CADASTER = 2
35
+ CLIP_PICC = 3
36
+ POINTS2POLYS = 4
37
+ RASTERIZE_IGN = 5
38
+ PREPROCESS_VULNCODE = 6
39
+ DATABASE_TO_RASTER = 7
40
+
41
+ @classmethod
42
+ def get_list_names(cls):
43
+ return [f'{cur.name} - {cur.value}' for cur in cls]
44
+
45
+ class steps_vulnerability(Enum):
46
+ """
47
+ Enum for the steps in the vulnerability computation
48
+ """
49
+ CREATE_RASTERS = 1
50
+ CREATE_RASTERS_VULN = 10
51
+ CREATE_RASTERS_CODE = 11
52
+ APPLY_MODIFS = 2
53
+ MATCH_SIMUL = 3
54
+
55
+ @classmethod
56
+ def get_list_names(cls):
57
+ return [f'{cur.name} - {cur.value}' for cur in cls]
58
+
59
+ class steps_acceptability(Enum):
60
+ """
61
+ Enum for the steps in the acceptability computation
62
+ """
63
+ COMPUTE_LOCAL_ACCEPT = 1
64
+ LOAD_FROM_FILES = 2
65
+ COMPUTE_MEAN_ACCEPT = 3
66
+ COMPUTE_WITHOUT_SCENARIOS = 4
67
+ COMPUTE_WITH_SCENARIOS = 5
68
+
69
+ @classmethod
70
+ def get_list_names(cls):
71
+ return [f'{cur.name} - {cur.value}' for cur in cls]
72
+
73
+ def Base_data_creation(main_dir:str = 'Data',
74
+ Original_gdb:str = 'GT_Resilence_dataRisques202010.gdb',
75
+ Study_area:str = 'Bassin_Vesdre.shp',
76
+ CaPa_Walloon:str = 'Cadastre_Walloon.gpkg',
77
+ PICC_Walloon:str = 'PICC_vDIFF.gdb',
78
+ CE_IGN_top10v:str = 'CE_IGN_TOP10V/CE_IGN_TOP10V.shp',
79
+ resolution:float = 1.,
80
+ number_procs:int = 8,
81
+ steps:list[int] | list[steps_base_data_creation] = [1,2,3,4,5,6,7],
82
+ Vuln_csv:str = 'Vulnerability.csv'):
83
+ """
84
+ Create the databse.
85
+
86
+ In this step, the following operations are performed:
87
+ - Clip the original gdb file to the study area
88
+ - Clip the Cadastre Walloon file to the study area
89
+ - Clip the PICC Walloon file to the study area
90
+ - Clip and Rasterize the IGN top10v file
91
+ - Create the study area database with the vulnerability levels
92
+
93
+
94
+ :param main_dir: The main data directory
95
+ :param Original_gdb: The original gdb file from SPW - GT Resilience
96
+ :param Study_area: The study area shapefile -- Data will be clipped to this area
97
+ :param CaPa_Walloon: The Cadastre Walloon file -- Shapfeile from SPW
98
+ :param PICC_Walloon: The PICC Walloon file -- Shapefile from SPW
99
+ :param CE_IGN_top10v: The CE "Cours d'eau" IGN top10v file -- Shapefile from IGN with river layer
100
+ :param resolution: The output resolution of the raster files
101
+ :param number_procs: The number of processors to use for parallel processing
102
+
103
+ """
104
+ LAYER_CABU = "CaBu"
105
+ LAYER_CAPA = "CaPa"
106
+ LAYER_BATIEMPRISE = "CONSTR_BATIEMPRISE"
107
+
108
+ manager = Accept_Manager(main_dir,
109
+ Study_area,
110
+ Original_gdb=Original_gdb,
111
+ CaPa_Walloon=CaPa_Walloon,
112
+ PICC_Walloon=PICC_Walloon,
113
+ CE_IGN_top10v=CE_IGN_top10v,
114
+ Vuln_csv=Vuln_csv)
115
+
116
+ if not manager.check_before_database_creation():
117
+ logging.error("The necessary files are missing - Verify logs for more information")
118
+ return
119
+
120
+ done = []
121
+
122
+ if 1 in steps or 6 in steps or steps_base_data_creation.PREPROCESS_VULNCODE in steps or steps_base_data_creation.CLIP_GDB in steps:
123
+ # Load the vulnerability CSV to get the layers
124
+ vulnerability_csv = Vulnerability_csv(manager.VULNERABILITY_CSV)
125
+
126
+ if 1 in steps or steps_base_data_creation.CLIP_GDB in steps:
127
+ # Clean the directory to avoid any conflict
128
+ # GPKG driver does not overwrite the existing file but adds new layers
129
+ cleaning_directory(manager.TMP_CLIPGDB)
130
+
131
+ # ********************************************************************************************************************
132
+ # Step 1, Clip Original GDB
133
+
134
+ # Clip the GDB file and store it in output directory : manager.TMP_CLIPGDB
135
+ parallel_gpd_clip(vulnerability_csv.get_layers(), manager.ORIGINAL_GDB, manager.SA, manager.TMP_CLIPGDB, number_procs)
136
+
137
+ done.append(steps_base_data_creation.CLIP_GDB)
138
+
139
+ if 2 in steps or steps_base_data_creation.CLIP_CADASTER in steps:
140
+ # ********************************************************************************************************************
141
+ # Step 2, Clip Cadaster data
142
+ cleaning_directory(manager.TMP_CADASTER)
143
+
144
+ # Only 2 layers are present in the Cadastre Walloon file
145
+ # Clip the Cadastre Walloon file and store it in output directory : manager.TMP_CADASTER
146
+ parallel_gpd_clip([LAYER_CABU, LAYER_CAPA], manager.CAPA_WALLOON, manager.SA, manager.TMP_CADASTER, min(2, number_procs))
147
+
148
+ done.append(steps_base_data_creation.CLIP_CADASTER)
149
+
150
+ if 3 in steps or steps_base_data_creation.CLIP_PICC in steps:
151
+ # ********************************************************************************************************************
152
+ # Step 3, Clip PICC data
153
+ cleaning_directory(manager.TMP_PICC)
154
+
155
+ # ONly 1 layer is needed from the PICC Walloon file
156
+ # Clip the PICC Walloon file and store it in output dir : manager.TMP_PICC
157
+ parallel_gpd_clip([LAYER_BATIEMPRISE], manager.PICC_WALLOON, manager.SA, manager.TMP_PICC, min(1, number_procs))
158
+
159
+ done.append(steps_base_data_creation.CLIP_PICC)
160
+
161
+ if 4 in steps or steps_base_data_creation.POINTS2POLYS in steps:
162
+ # ********************************************************************************************************************
163
+ # Step 4, create database based on changes in report
164
+
165
+ cleaning_directory(manager.TMP_WMODIF)
166
+
167
+ # PreLoad Picc and CaPa from clipped files
168
+ Picc:gpd.GeoDataFrame = read_dataframe(str(manager.TMP_PICC / (LAYER_BATIEMPRISE+EXTENT)), layer=LAYER_BATIEMPRISE)
169
+ CaPa:gpd.GeoDataFrame = read_dataframe(str(manager.TMP_CADASTER / (LAYER_CAPA+EXTENT)), layer=LAYER_CAPA)
170
+
171
+ assert Picc.crs == CaPa.crs, "The crs of the two shapefiles are different"
172
+
173
+ parallel_datamod(manager=manager, picc=Picc, capa=CaPa, number_procs=number_procs)
174
+
175
+ done.append(steps_base_data_creation.POINTS2POLYS)
176
+
177
+ if 5 in steps or steps_base_data_creation.RASTERIZE_IGN in steps:
178
+ # ********************************************************************************************************************
179
+ # Step 5 : Rasaterize the IGN data "Course d'eau" to get the riverbed mask
180
+ LAYER_IGN = "CE_IGN_TOP10V"
181
+ clip_layer(layer=LAYER_IGN, file_path=manager.CE_IGN_TOP10V, Study_Area=manager.SA, output_dir=manager.TMP_IGNCE)
182
+ shp_to_raster(manager.TMP_IGNCE / (LAYER_IGN + '.gpkg'), manager.SA_MASKED_RIVER, resolution, manager=manager)
183
+
184
+ done.append(steps_base_data_creation.RASTERIZE_IGN)
185
+
186
+ if 6 in steps or steps_base_data_creation.PREPROCESS_VULNCODE in steps:
187
+ # ********************************************************************************************************************
188
+ # Step 6 : Pre-processing for Vulnerability
189
+ # Save the database with vulnerability levels and codes
190
+ # This database will be rasterized in 'Database_to_raster'
191
+
192
+ layers_sa = manager.get_layers_in_wmodif()
193
+ layers_csv = vulnerability_csv.get_layers()
194
+
195
+ # Search difference between the two lists of layers
196
+ list_shp = list(set(layers_csv).difference(layers_sa))
197
+
198
+ logging.info("Excluded layers due to no features in shapefiles:")
199
+ logging.info(list_shp)
200
+
201
+ not_in_csv = [curlayer for curlayer in layers_sa if curlayer not in layers_csv]
202
+ if len(not_in_csv) > 0:
203
+ logging.error("Not treated layers due to no vulnerability level or code:")
204
+ logging.error(not_in_csv)
205
+
206
+ logging.info("STEP1: Saving the database for Vulnerability with attributes Vulne and Code")
207
+
208
+ for curlayer in layers_sa:
209
+ logging.info(curlayer)
210
+
211
+ in_file = str(manager.TMP_WMODIF / (curlayer+EXTENT))
212
+ out_file = str(manager.TMP_CODEVULNE / (curlayer+EXTENT))
213
+
214
+ shp:gpd.GeoDataFrame = gpd.read_file(in_file)
215
+
216
+ nb_lines, _ = shp.shape
217
+ if nb_lines > 0:
218
+ shp["Path"] = curlayer
219
+ shp["Vulne"] = vulnerability_csv.get_vulnerability_level(curlayer)
220
+ shp["Code"] = vulnerability_csv.get_vulnerability_code(curlayer)
221
+ shp = shp[["geometry", "Path", "Vulne","Code"]]
222
+ shp.to_file(out_file)
223
+ else:
224
+ # Normally, Phase 1 should have removed the empty shapefiles
225
+ # But, we never know... ;-)
226
+ logging.warning(f"Empty shapefile {curlayer} in {in_file}")
227
+
228
+ done.append(steps_base_data_creation.PREPROCESS_VULNCODE)
229
+
230
+ if 7 in steps or steps_base_data_creation.DATABASE_TO_RASTER in steps:
231
+ # Rasterize the database
232
+ cleaning_directory(manager.TMP_RASTERS)
233
+ cleaning_directory(manager.TMP_RASTERS_CODE)
234
+ cleaning_directory(manager.TMP_RASTERS_VULNE)
235
+
236
+ Database_to_raster(main_dir,
237
+ Study_area,
238
+ resolution,
239
+ number_procs=number_procs,
240
+ Vuln_csv=Vuln_csv)
241
+
242
+ done.append(steps_base_data_creation.DATABASE_TO_RASTER)
243
+
244
+ return done
245
+
246
+ def Database_to_raster(main_dir:str = 'Data',
247
+ Study_area:str = 'Bassin_Vesdre.shp',
248
+ resolution:float = 1.,
249
+ number_procs:int = 16,
250
+ Vuln_csv:str = 'Vulnerability.csv'):
251
+ """
252
+ Convert the vector database to raster database based on their vulnerability values
253
+
254
+ Each leyer is converted to a raster file with the vulnerability values
255
+ and the code values.
256
+
257
+ They are stored in the TEMP/DATABASES/*StudyArea*/VULNERABILITY/RASTERS in:
258
+ - Code
259
+ - Vulne
260
+
261
+ :param main_dir: The main data directory
262
+ :param Study_area: The study area shapefile
263
+ :param resolution: The resolution of the output raster files - default is 1 meter
264
+ :param number_procs: The number of processors to use for parallel processing
265
+
266
+ The parallel processing is safe as each layer is processed independently.
267
+ """
268
+
269
+ manager = Accept_Manager(main_dir, Study_area, Vuln_csv=Vuln_csv)
270
+
271
+ resolution = float(resolution)
272
+
273
+ if not manager.check_before_rasterize():
274
+ logging.error("The necessary files are missing - Verify logs for more information")
275
+ return
276
+
277
+ logging.info("Convert vectors to raster based on their vulnerability values")
278
+
279
+ attributes = ["Vulne", "Code"]
280
+ for cur_attrib in attributes:
281
+ parallel_v2r(manager, cur_attrib, resolution, number_procs, convert_to_sparse=True)
282
+
283
+ def Vulnerability(main_dir:str = 'Data',
284
+ scenario:str = 'Scenario1',
285
+ Study_area:str = 'Bassin_Vesdre.shp',
286
+ resolution:float = 1.,
287
+ steps:list[int] | list[steps_vulnerability] = [1,10,11,2,3],
288
+ Vuln_csv:str = 'Vulnerability.csv',
289
+ Intermediate_csv:str = 'Intermediate.csv'):
290
+ """
291
+ Compute the vulnerability for the study area and the scenario, if needed.
292
+
293
+ The vulnerability is computed in 3 steps:
294
+ 1. Compute the vulnerability for the study area
295
+ 2. Compute the vulnerability for the scenario
296
+ 3. Clip the vulnerability rasters to the simulation area
297
+
298
+ During step 3, three matrices are computed and clipped to the simulation area:
299
+ - Vulnerability
300
+ - Code
301
+ - Masked River
302
+
303
+ :param main_dir: The main data directory
304
+ :param scenario: The scenario name
305
+ :param Study_area: The study area shapefile
306
+ :param resolution: The resolution of the output raster files - default is 1 meter
307
+ :param steps: The steps to compute the vulnerability - default is [1,2,3]
308
+
309
+ To be more rapid, the steps can be computed separately.
310
+ - [1,2,3] : All steps are computed - Necessary for the first time
311
+ - [2,3] : Only the scenario and clipping steps are computed -- Useful for scenario changes
312
+ - [3] : Only the clipping step is computed -- Useful if simulation area changes but scenario is the same
313
+
314
+ """
315
+
316
+ #Call of the Manager Class --> allows structure
317
+ manager = Accept_Manager(main_dir,
318
+ Study_area,
319
+ scenario=scenario,
320
+ Vuln_csv=Vuln_csv,
321
+ Intermediate_csv=Intermediate_csv)
322
+
323
+ if not manager.check_before_vulnerability():
324
+ logging.error("The necessary files are missing - Verify logs for more information")
325
+ return
326
+
327
+ logging.info("Starting VULNERABILITY computations at {} m resolution".format(resolution))
328
+
329
+ done = []
330
+
331
+ if 1 in steps or steps_vulnerability.CREATE_RASTERS in steps:
332
+ # Step 1 : Compute the vulnerability rasters for the study area
333
+ # The data **will not** be impacted by the scenario modifications
334
+
335
+ logging.info("Generate Vulnerability rasters {}m".format(resolution))
336
+
337
+ cleaning_directory(manager.TMP_SCEN_DIR)
338
+
339
+ if 10 in steps or steps_vulnerability.CREATE_RASTERS_VULN in steps:
340
+ compute_vulnerability(manager) #global
341
+ done.append(steps_vulnerability.CREATE_RASTERS_VULN)
342
+
343
+ if 11 in steps or steps_vulnerability.CREATE_RASTERS_CODE in steps:
344
+ compute_code(manager) #global
345
+ done.append(steps_vulnerability.CREATE_RASTERS_CODE)
346
+
347
+ done.append(steps_vulnerability.CREATE_RASTERS)
348
+
349
+ if 2 in steps or steps_vulnerability.APPLY_MODIFS in steps:
350
+ # Step 2 : Compute the vulnerability rasters for the scenario
351
+ # The data **will be** impacted by the scenario modifications
352
+
353
+ if not manager.check_vuln_code_sa():
354
+ logging.error("The vulnerability and code files for the study area are missing")
355
+ logging.warning("Force the computation even if not prescribed in the steps")
356
+
357
+ Vulnerability(main_dir, scenario, Study_area, resolution, [1])
358
+
359
+ bu:list[Path] = manager.get_files_in_rm_buildings()
360
+
361
+ if len(bu)>0:
362
+ for curfile in bu:
363
+ tiff_file = manager.TMP_RM_BUILD_DIR / (curfile.stem + ".tiff")
364
+ shp_to_raster(curfile, tiff_file)
365
+
366
+ compute_vulnerability4scenario(manager)
367
+ else:
368
+ logging.warning(f"No buildings were removed in water depth analysis OR No shapefiles in {manager.IN_RM_BUILD_DIR}")
369
+
370
+ done.append(steps_vulnerability.APPLY_MODIFS)
371
+
372
+ if 3 in steps or steps_vulnerability.MATCH_SIMUL in steps:
373
+ # Step 3 : Clip the vulnerability/code rasters to the **simulation area**
374
+ logging.info("Save Vulnerability files for the area of interest")
375
+
376
+ return_periods = manager.get_return_periods()
377
+ TMAX = manager.get_filepath_for_return_period(return_periods[-1])
378
+
379
+ if TMAX is None:
380
+ logging.error("The file for the maximum return period is missing")
381
+ return
382
+
383
+ match_vulnerability2sim(manager.SA_MASKED_RIVER, manager.OUT_MASKED_RIVER, TMAX)
384
+ match_vulnerability2sim(manager.SA_VULN, manager.OUT_VULN, TMAX)
385
+ match_vulnerability2sim(manager.SA_CODE, manager.OUT_CODE, TMAX)
386
+
387
+ # Scenarios including change in vulnerability
388
+ existence=False
389
+ existence = manager.create_vrtIfExists()
390
+
391
+ if existence == True :
392
+ logging.info("Scenarios have been applied to the vulnerability matrix see _scenarios")
393
+ manager.translate_vrt2tif()
394
+
395
+ done.append(steps_vulnerability.MATCH_SIMUL)
396
+
397
+ return done
398
+
399
+
400
+ def Acceptability(main_dir:str = 'Vesdre',
401
+ scenario:str = 'Scenario1',
402
+ Study_area:str = 'Bassin_Vesdre.shp',
403
+ coeff_auto:bool = True,
404
+ Ponderation_csv:str = 'Ponderation.csv',
405
+ resample_size:int = 100,
406
+ steps:list[int] | list[steps_acceptability] = [1,2,3,4,5]):
407
+ """ Compute acceptability for the scenario """
408
+
409
+ done = []
410
+
411
+ manager = Accept_Manager(main_dir,
412
+ Study_area,
413
+ scenario=scenario,
414
+ Ponderation_csv=Ponderation_csv)
415
+
416
+ # Load the vulnerability raster **for the scenario**, and check if an assembly exists and is asked by the user
417
+ # Initialization of lists to read/ write according to the needed steps
418
+ VulneToCompute, PathsToSaveA, PathsToSaveA100 = [], [], []
419
+ if 4 in steps or steps_acceptability.COMPUTE_WITHOUT_SCENARIOS in steps:
420
+ VulneToCompute.append(manager.OUT_VULN)
421
+ PathsToSaveA.append(manager.OUT_ACCEPT)
422
+ PathsToSaveA100.append(manager.OUT_ACCEPT_100M)
423
+
424
+
425
+ if 5 in steps or steps_acceptability.COMPUTE_WITH_SCENARIOS in steps:
426
+ change_vuln_files = [Path(a) for a in glob.glob(str(manager.IN_CH_VULN / "*.tiff"))]
427
+ if len(change_vuln_files) != 0:
428
+ VulneToCompute.append(manager.OUT_VULN_Stif)
429
+ PathsToSaveA.append(manager.OUT_ACCEPT_Stif)
430
+ PathsToSaveA100.append(manager.OUT_ACCEPT_100M_Stif)
431
+ else :
432
+ logging.info("No vulnerability rasters in CHANGE_VULNE. The code goes on without them.")
433
+
434
+ for i in range(len(VulneToCompute)) :
435
+ vulne = gdal.Open(str(VulneToCompute[i]))
436
+ saveA = PathsToSaveA[i]
437
+ saveA100 = PathsToSaveA100[i]
438
+ # Load the river mask
439
+ riv = gdal.Open(str(manager.OUT_MASKED_RIVER))
440
+
441
+ # Get the geotransform and projection for the output tiff
442
+ geotrans = riv.GetGeoTransform()
443
+ proj = riv.GetProjection()
444
+
445
+ assert vulne.GetGeoTransform() == riv.GetGeoTransform(), "The geotransform of the two rasters is different"
446
+ assert vulne.GetProjection() == riv.GetProjection(), "The projection of the two rasters is different"
447
+
448
+ # Convert to numpy array
449
+ vulne = vulne.GetRasterBand(1).ReadAsArray()
450
+ riv = riv.GetRasterBand(1).ReadAsArray()
451
+
452
+ # Get the return periods available
453
+ return_periods = manager.get_return_periods()
454
+
455
+ # Prepare the river bed filter
456
+ # Useful as we iterate over the return periods
457
+ # and the river bed is the same for all return periods
458
+ ij_riv = np.argwhere(riv == 1)
459
+
460
+ # Initialize the dictionary to store the acceptability values
461
+ part_accept = {}
462
+
463
+ if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
464
+ # Compute acceptability for each return period
465
+ for curT in tqdm(return_periods):
466
+
467
+ # Load the **FILLED** modelled water depth for the return period
468
+ model_h = gdal.Open(str(manager.get_sim_file_for_return_period(curT)))
469
+ # Convert to numpy array
470
+ model_h = model_h.GetRasterBand(1).ReadAsArray()
471
+
472
+ assert model_h.shape == vulne.shape, "The shape of the modelled water depth is different from the vulnerability raster"
473
+
474
+ # Set 0. if the water depth is 0.
475
+ model_h[model_h == 0] = 0
476
+ # Set 0. in the river bed
477
+ model_h[ij_riv[:,0], ij_riv[:,1]] = 0
478
+
479
+ assert model_h[ij_riv[0][0], ij_riv[0][1]] == 0, "The river bed is not set to 0 in the modelled water depth"
480
+ assert model_h.max() > 0, "The maximum water depth is 0"
481
+ if model_h.min() < 0:
482
+ logging.warning("The minimum water depth is negative - {} cells".format(np.count_nonzero(model_h<0)))
483
+ logging.warning("Setting the negative values to 0")
484
+ model_h[model_h < 0] = 0
485
+
486
+ logging.info("Return period {}".format(curT))
487
+
488
+ # Compute the local acceptability for the return period
489
+ part_accept[curT] = compute_acceptability(manager, model_h, vulne, curT, (geotrans, proj))
490
+
491
+ done.append(steps_acceptability.COMPUTE_LOCAL_ACCEPT)
492
+
493
+ # At this point, the local acceptability for each return period is computed
494
+ # and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory.
495
+ # The arrays are also stored in the part_accept dictionary.
496
+
497
+ if 2 in steps or steps_acceptability.LOAD_FROM_FILES in steps:
498
+ # Load/Reload the acceptability values from files
499
+
500
+ if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
501
+ # We have computed/updted the acceptibility values.
502
+ # We do not need to reload them.
503
+ logging.warning("The acceptability values have been computed in step 1 - avoid reloading")
504
+ logging.info("If you want to reload the acceptability values, please remove step 1 from the list of steps")
505
+ else:
506
+
507
+ # Get the list of Q files
508
+ qs = manager.get_q_files()
509
+
510
+ # Iterate over the return periods
511
+ for curT in return_periods:
512
+ logging.info(curT)
513
+
514
+ # We set the filename from the return period, not the "qs" list
515
+ q_filename = manager.TMP_QFILES / "Q{}.tif".format(curT)
516
+
517
+ # Check if the file exists
518
+ assert q_filename.exists(), "The file {} does not exist".format(q_filename)
519
+ # Check if the file is in the "qs" list
520
+ assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
521
+
522
+ # Load the Q file for the return period
523
+ tmp_data = gdal.Open(str(q_filename))
524
+ # Convert to numpy array
525
+ part_accept[curT] = tmp_data.GetRasterBand(1).ReadAsArray()
526
+
527
+ done.append(steps_acceptability.LOAD_FROM_FILES)
528
+
529
+ if 3 in steps or steps_acceptability.COMPUTE_MEAN_ACCEPT in steps:
530
+
531
+ assert len(part_accept) == len(return_periods), "The number of acceptability files is not equal to the number of return periods"
532
+
533
+ # Pointing the last return period, maybe 1000 but not always
534
+ array_tmax = part_accept[return_periods[-1]]
535
+
536
+ # Get ponderations for the return periods
537
+ if coeff_auto:
538
+ logging.info("Automatic ponderation")
539
+ pond = manager.get_ponderations()
540
+ assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
541
+
542
+ elif manager.is_valid_ponderation_csv:
543
+ logging.info("Manual ponderation")
544
+ # Load the ponderation file
545
+ pond = pd.read_csv(manager.PONDERATION_CSV)
546
+ # Set the index to the interval, so we can use the interval as a key
547
+ pond.set_index("Interval", inplace=True)
548
+
549
+ else:
550
+ logging.error("The ponderation file is missing")
551
+ logging.info("Please provide the ponderation file or set 'coeff_auto' to True")
552
+ return -1
553
+
554
+ assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
555
+
556
+ # Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
557
+ comb = np.zeros(part_accept[return_periods[-1]].shape, dtype=np.float32)
558
+
559
+ for curT in return_periods:
560
+ assert part_accept[curT].dtype == np.float32, "The dtype of the acceptability matrix is not np.float32"
561
+ assert part_accept[curT].shape == comb.shape, "The shape of the acceptability matrix is not the right one"
562
+
563
+ comb += part_accept[curT] * float(pond["Ponderation"][curT])
564
+
565
+ y_pixels, x_pixels = comb.shape # number of pixels in x
566
+
567
+ # Set up output GeoTIFF
568
+ driver = gdal.GetDriverByName('GTiff')
569
+
570
+ dataset = driver.Create(str(saveA),
571
+ x_pixels, y_pixels,
572
+ 1,
573
+ gdal.GDT_Float32,
574
+ options=["COMPRESS=LZW"])
575
+
576
+ assert comb.dtype == np.float32, "The dtype of the combined acceptability matrix is not np.float32"
577
+
578
+ dataset.GetRasterBand(1).WriteArray(comb)
579
+ dataset.SetGeoTransform(geotrans)
580
+ dataset.SetProjection(proj)
581
+ dataset.FlushCache()
582
+ dataset=None
583
+
584
+ # Resample to XXm
585
+ Agg = gdal.Warp(str(saveA100),
586
+ str(saveA),
587
+ xRes=resample_size,
588
+ yRes=resample_size,
589
+ resampleAlg='Average')
590
+ Agg.FlushCache()
591
+ Agg = None
592
+
593
+ done.append(steps_acceptability.COMPUTE_MEAN_ACCEPT)
594
+
564
595
  return done