wolfhece 2.1.23__py3-none-any.whl → 2.1.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wolfhece/acceptability/Parallels.py +81 -0
- wolfhece/acceptability/__init__.py +3 -0
- wolfhece/acceptability/acceptability.py +420 -0
- wolfhece/acceptability/acceptability1.py +211 -0
- wolfhece/acceptability/acceptability_gui.py +318 -0
- wolfhece/acceptability/cli.py +150 -0
- wolfhece/acceptability/func.py +1058 -0
- wolfhece/apps/version.py +1 -1
- wolfhece/cli.py +5 -0
- wolfhece/wolf_array.py +16 -4
- {wolfhece-2.1.23.dist-info → wolfhece-2.1.25.dist-info}/METADATA +1 -1
- {wolfhece-2.1.23.dist-info → wolfhece-2.1.25.dist-info}/RECORD +15 -8
- {wolfhece-2.1.23.dist-info → wolfhece-2.1.25.dist-info}/entry_points.txt +4 -1
- {wolfhece-2.1.23.dist-info → wolfhece-2.1.25.dist-info}/WHEEL +0 -0
- {wolfhece-2.1.23.dist-info → wolfhece-2.1.25.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,81 @@
|
|
1
|
+
from .func import gpd_clip, data_modification, vector_to_raster, Comp_Vulnerability, match_vuln_modrec, VulMod, shp_to_raster
|
2
|
+
import multiprocessing
|
3
|
+
from functools import partial
|
4
|
+
import os
|
5
|
+
from pathlib import Path
|
6
|
+
|
7
|
+
def parallel_gpd_clip(layer:list[str],
|
8
|
+
file_path:str,
|
9
|
+
Study_Area:str,
|
10
|
+
output_gpkg:str,
|
11
|
+
number_procs:int = 1):
|
12
|
+
"""
|
13
|
+
Clip the layers to the study area.
|
14
|
+
|
15
|
+
Process the layers in parallel.
|
16
|
+
|
17
|
+
FIXME: The GPKG driver is it totally parallel compliant?
|
18
|
+
|
19
|
+
:param layer: List of layers to clip
|
20
|
+
:param file_path: The path to the file
|
21
|
+
:param Study_Area: The study area
|
22
|
+
:param output_gpkg: The output geopackage
|
23
|
+
:param number_procs: The number of processors to use
|
24
|
+
|
25
|
+
"""
|
26
|
+
file_path = str(file_path)
|
27
|
+
Study_Area = str(Study_Area)
|
28
|
+
output_gpkg = str(output_gpkg)
|
29
|
+
|
30
|
+
if number_procs == 1:
|
31
|
+
|
32
|
+
for curlayer in layer:
|
33
|
+
gpd_clip(curlayer, file_path, Study_Area, output_gpkg)
|
34
|
+
|
35
|
+
else:
|
36
|
+
pool = multiprocessing.Pool(processes=number_procs)
|
37
|
+
prod_x=partial(gpd_clip,
|
38
|
+
file_path=file_path,
|
39
|
+
Study_Area=Study_Area,
|
40
|
+
geopackage=output_gpkg)
|
41
|
+
result_list = pool.map(prod_x, layer)
|
42
|
+
print(result_list)
|
43
|
+
|
44
|
+
def parallel_v2r(layers:list[str],
|
45
|
+
study_area_database:Path,
|
46
|
+
extent:Path,
|
47
|
+
attribute:str,
|
48
|
+
pixel:float,
|
49
|
+
number_procs:int = 1):
|
50
|
+
"""
|
51
|
+
Convert the vector layers to raster.
|
52
|
+
|
53
|
+
Process the layers in parallel.
|
54
|
+
|
55
|
+
:remark: It is permitted to execute this function in multiprocessing because we write separate files.
|
56
|
+
|
57
|
+
:param layers: List of layers to convert to raster.
|
58
|
+
:param study_area_database: The Path to the study area
|
59
|
+
:param extent: The extent of the study area
|
60
|
+
:param attribute: The attribute to convert to raster
|
61
|
+
:param pixel: The pixel size of the raster
|
62
|
+
:param number_procs: The number of processors to use
|
63
|
+
|
64
|
+
"""
|
65
|
+
|
66
|
+
attribute = str(attribute)
|
67
|
+
|
68
|
+
if number_procs == 1:
|
69
|
+
for curlayer in layers:
|
70
|
+
vector_to_raster(curlayer, study_area_database, extent, attribute, pixel)
|
71
|
+
else:
|
72
|
+
pool = multiprocessing.Pool(processes=number_procs)
|
73
|
+
prod_x=partial(vector_to_raster,
|
74
|
+
vector_input=study_area_database,
|
75
|
+
extent=extent,
|
76
|
+
attribute=attribute,
|
77
|
+
pixel_size=pixel) # prod_x has only one argument x (y is fixed to 10)
|
78
|
+
|
79
|
+
result_list = pool.map(prod_x, layers)
|
80
|
+
print(result_list)
|
81
|
+
|
@@ -0,0 +1,420 @@
|
|
1
|
+
from .Parallels import parallel_gpd_clip, parallel_v2r
|
2
|
+
from .func import data_modification, Comp_Vulnerability, Comp_Vulnerability_Scen, match_vuln_modrec, VulMod, shp_to_raster, Accept_Manager, cleaning_directory
|
3
|
+
|
4
|
+
import pandas as pd
|
5
|
+
import os
|
6
|
+
from osgeo import gdal
|
7
|
+
import fiona
|
8
|
+
import glob
|
9
|
+
import numpy as np
|
10
|
+
import geopandas as gpd
|
11
|
+
from pathlib import Path
|
12
|
+
import logging
|
13
|
+
from tqdm import tqdm
|
14
|
+
|
15
|
+
|
16
|
+
|
17
|
+
|
18
|
+
class Vulnerability_csv():
|
19
|
+
|
20
|
+
def __init__(self, file:Path) -> None:
|
21
|
+
self.file = file
|
22
|
+
self.data = pd.read_csv(file, sep=",", encoding='latin-1')
|
23
|
+
|
24
|
+
def get_layers(self) -> list:
|
25
|
+
return [a[1] for a in self.data["Path"].str.split('/')]
|
26
|
+
|
27
|
+
def get_vulnerability_level(self, layer:str) -> str:
|
28
|
+
idx = self.get_layers().index(layer)
|
29
|
+
return self.data.iloc[idx]["Vulne"]
|
30
|
+
|
31
|
+
def get_vulnerability_code(self, layer:str) -> str:
|
32
|
+
idx = self.get_layers().index(layer)
|
33
|
+
return self.data.iloc[idx]["Code"]
|
34
|
+
|
35
|
+
def Base_data_creation(main_dir:str = 'Data',
|
36
|
+
Original_gdb:str = 'GT_Resilence_dataRisques202010.gdb',
|
37
|
+
Study_area:str = 'Bassin_Vesdre.shp',
|
38
|
+
CaPa_Walloon:str = 'Cadastre_Walloon.gpkg',
|
39
|
+
PICC_Walloon:str = 'PICC_vDIFF.gdb',
|
40
|
+
CE_IGN_top10v:str = 'CE_IGN_TOP10V/CE_IGN_TOP10V.shp',
|
41
|
+
resolution:float = 1.,
|
42
|
+
number_procs:int = 8):
|
43
|
+
"""
|
44
|
+
Create the databse.
|
45
|
+
|
46
|
+
In this step, the following operations are performed:
|
47
|
+
- Clip the original gdb file to the study area
|
48
|
+
- Clip the Cadastre Walloon file to the study area
|
49
|
+
- Clip the PICC Walloon file to the study area
|
50
|
+
- Clip and Rasterize the IGN top10v file
|
51
|
+
- Create the study area database with the vulnerability levels
|
52
|
+
|
53
|
+
|
54
|
+
:param main_dir: The main data directory
|
55
|
+
:param Original_gdb: The original gdb file from SPW - GT Resilience
|
56
|
+
:param Study_area: The study area shapefile -- Data will be clipped to this area
|
57
|
+
:param CaPa_Walloon: The Cadastre Walloon file -- Shapfeile from SPW
|
58
|
+
:param PICC_Walloon: The PICC Walloon file -- Shapefile from SPW
|
59
|
+
:param CE_IGN_top10v: The CE "Cours d'eau" IGN top10v file -- Shapefile from IGN with river layer
|
60
|
+
:param resolution: The output resolution of the raster files
|
61
|
+
:param number_procs: The number of processors to use for parallel processing
|
62
|
+
|
63
|
+
"""
|
64
|
+
NUMBER_PROCS = number_procs
|
65
|
+
|
66
|
+
dirsnames = Accept_Manager(main_dir,
|
67
|
+
Study_area,
|
68
|
+
Original_gdb=Original_gdb,
|
69
|
+
CaPa_Walloon=CaPa_Walloon,
|
70
|
+
PICC_Walloon=PICC_Walloon,
|
71
|
+
CE_IGN_top10v=CE_IGN_top10v)
|
72
|
+
|
73
|
+
if not dirsnames.check_before_database_creation():
|
74
|
+
logging.error("The necessary files are missing - Verify logs for more information")
|
75
|
+
return
|
76
|
+
|
77
|
+
dirsnames.change_dir()
|
78
|
+
|
79
|
+
# Clean the directory to avoid any conflict
|
80
|
+
# GPKG driver does not overwrite the existing file but adds new layers
|
81
|
+
cleaning_directory(dirsnames.TMP_STUDYAREA)
|
82
|
+
|
83
|
+
# ********************************************************************************************************************
|
84
|
+
# Step 1, Clip Original GDB
|
85
|
+
|
86
|
+
# Load the vulnerability CSV to get the layers
|
87
|
+
vulnerability_csv = Vulnerability_csv(dirsnames.VULNERABILITY_CSV)
|
88
|
+
# Clip the GDB file and store it in dirsnames.SA_DATABASE
|
89
|
+
parallel_gpd_clip(vulnerability_csv.get_layers(), dirsnames.ORIGINAL_GDB, dirsnames.SA, dirsnames.SA_DATABASE, NUMBER_PROCS)
|
90
|
+
|
91
|
+
# ********************************************************************************************************************
|
92
|
+
# Step 2, Clip Cadaster data
|
93
|
+
|
94
|
+
# Only 2 layers are present in the Cadastre Walloon file
|
95
|
+
LAYER_CABU = "CaBu"
|
96
|
+
LAYER_CAPA = "CaPa"
|
97
|
+
# Clip the Cadastre Walloon file and store it in dirsnames.SA_CAPA
|
98
|
+
parallel_gpd_clip([LAYER_CABU, LAYER_CAPA], dirsnames.CAPA_WALLOON, dirsnames.SA, dirsnames.SA_CAPA, min(2, NUMBER_PROCS))
|
99
|
+
|
100
|
+
# ********************************************************************************************************************
|
101
|
+
# Step 3, Clip PICC data
|
102
|
+
|
103
|
+
# ONly 1 layer is needed from the PICC Walloon file
|
104
|
+
LAYER_BATIEMPRISE = "CONSTR_BATIEMPRISE"
|
105
|
+
# Clip the PICC Walloon file and store it in dirsnames.SA_PICC
|
106
|
+
parallel_gpd_clip([LAYER_BATIEMPRISE], dirsnames.PICC_WALLOON, dirsnames.SA, dirsnames.SA_PICC, min(1, NUMBER_PROCS))
|
107
|
+
|
108
|
+
# ********************************************************************************************************************
|
109
|
+
# Step 4, create database based on changes in report
|
110
|
+
|
111
|
+
layers = fiona.listlayers(dirsnames.SA_DATABASE)
|
112
|
+
# PreLoad Picc and CaPa from clipped files
|
113
|
+
Picc:gpd.GeoDataFrame = gpd.read_file(dirsnames.SA_PICC, layer = LAYER_BATIEMPRISE)
|
114
|
+
CaPa:gpd.GeoDataFrame = gpd.read_file(dirsnames.SA_CAPA, layer = LAYER_CAPA)
|
115
|
+
|
116
|
+
assert Picc.crs == CaPa.crs, "The crs of the two shapefiles are different"
|
117
|
+
|
118
|
+
for curlayer in tqdm(layers, desc="Vulnerability : Processing layers"):
|
119
|
+
data_modification(dirsnames.SA_DATABASE, curlayer, dirsnames.SA_FINAL, Picc, CaPa)
|
120
|
+
|
121
|
+
# ********************************************************************************************************************
|
122
|
+
# Step 5 : Rasaterize the IGN data "Course d'eau" to get the riverbed mask
|
123
|
+
shp_to_raster(dirsnames.CE_IGN_TOP10V, dirsnames.SA_MASKED_RIVER, resolution)
|
124
|
+
|
125
|
+
# ********************************************************************************************************************
|
126
|
+
# Step 6 : Pre-processing for Vulnerability
|
127
|
+
# Save the database with vulnerability levels and codes
|
128
|
+
# This database will be rasterized in 'Database_to_raster'
|
129
|
+
|
130
|
+
layers_sa = fiona.listlayers(dirsnames.SA_FINAL)
|
131
|
+
layers_csv = vulnerability_csv.get_layers()
|
132
|
+
|
133
|
+
# Search difference between the two lists of layers
|
134
|
+
list_shp = list(set(layers_csv).difference(layers_sa))
|
135
|
+
|
136
|
+
logging.info("Excluded layers due to no features in shapefiles:")
|
137
|
+
logging.info(list_shp)
|
138
|
+
|
139
|
+
logging.info("STEP1: Saving the database for Vulnerability with attributes Vulne and Code")
|
140
|
+
|
141
|
+
for curlayer in layers_sa:
|
142
|
+
logging.info(curlayer)
|
143
|
+
|
144
|
+
shp:gpd.GeoDataFrame = gpd.read_file(dirsnames.SA_FINAL, layer=curlayer)
|
145
|
+
|
146
|
+
x, y = shp.shape
|
147
|
+
if x > 0:
|
148
|
+
shp["Path"] = curlayer
|
149
|
+
shp["Vulne"] = vulnerability_csv.get_vulnerability_level(curlayer)
|
150
|
+
shp["Code"] = vulnerability_csv.get_vulnerability_code(curlayer)
|
151
|
+
shp = shp[["geometry", "Path", "Vulne","Code"]]
|
152
|
+
shp.to_file(dirsnames.SA_FINAL_V, layer=curlayer)
|
153
|
+
|
154
|
+
# Rasterize the database
|
155
|
+
Database_to_raster(main_dir, Study_area, resolution)
|
156
|
+
|
157
|
+
dirsnames.restore_dir()
|
158
|
+
|
159
|
+
def Database_to_raster(main_dir:str = 'Data',
|
160
|
+
Study_area:str = 'Bassin_Vesdre.shp',
|
161
|
+
resolution:float = 1.,
|
162
|
+
number_procs:int = 16):
|
163
|
+
"""
|
164
|
+
Convert the vector database to raster database based on their vulnerability values
|
165
|
+
|
166
|
+
Ecah leyer is converted to a raster file with the vulnerability values
|
167
|
+
and the code values.
|
168
|
+
|
169
|
+
They are stored in the TEMP/DATABASES/*StudyArea*/VULNERABILITY/RASTERS in:
|
170
|
+
- Code
|
171
|
+
- Vulne
|
172
|
+
|
173
|
+
:param main_dir: The main data directory
|
174
|
+
:param Study_area: The study area shapefile
|
175
|
+
:param resolution: The resolution of the output raster files - default is 1 meter
|
176
|
+
:param number_procs: The number of processors to use for parallel processing
|
177
|
+
|
178
|
+
The parellel processing is safe as each layer is processed independently.
|
179
|
+
"""
|
180
|
+
|
181
|
+
dirsnames = Accept_Manager(main_dir, Study_area)
|
182
|
+
|
183
|
+
resolution = float(resolution)
|
184
|
+
|
185
|
+
if not dirsnames.check_before_rasterize():
|
186
|
+
logging.error("The necessary files are missing - Verify logs for more information")
|
187
|
+
return
|
188
|
+
|
189
|
+
dirsnames.change_dir()
|
190
|
+
|
191
|
+
logging.info("Convert vectors to raster based on their vulnerability values")
|
192
|
+
layers = fiona.listlayers(dirsnames.SA_FINAL_V)
|
193
|
+
|
194
|
+
attributes = ["Vulne", "Code"]
|
195
|
+
for cur_attrib in attributes:
|
196
|
+
parallel_v2r(layers, dirsnames.SA_FINAL_V, dirsnames.SA, cur_attrib, resolution, number_procs)
|
197
|
+
|
198
|
+
dirsnames.restore_dir()
|
199
|
+
|
200
|
+
def Vulnerability(main_dir:str = 'Data',
|
201
|
+
scenario:str = 'Scenario1',
|
202
|
+
Study_area:str = 'Bassin_Vesdre.shp',
|
203
|
+
resolution:float = 1.,
|
204
|
+
steps:list[int] = [1,2,3]):
|
205
|
+
"""
|
206
|
+
Compute the vulnerability for the study area and the scenario, if needed.
|
207
|
+
|
208
|
+
The vulnerability is computed in 3 steps:
|
209
|
+
1. Compute the vulnerability for the study area
|
210
|
+
2. Compute the vulnerability for the scenario
|
211
|
+
3. Clip the vulnerability rasters to the simulation area
|
212
|
+
|
213
|
+
During step 3, three matrices are computed and clipped to the simulation area:
|
214
|
+
- Vulnerability
|
215
|
+
- Code
|
216
|
+
- Masked River
|
217
|
+
|
218
|
+
:param main_dir: The main data directory
|
219
|
+
:param scenario: The scenario name
|
220
|
+
:param Study_area: The study area shapefile
|
221
|
+
:param resolution: The resolution of the output raster files - default is 1 meter
|
222
|
+
:param steps: The steps to compute the vulnerability - default is [1,2,3]
|
223
|
+
|
224
|
+
To be more rapid, the steps can be computed separately.
|
225
|
+
- [1,2,3] : All steps are computed - Necessary for the first time
|
226
|
+
- [2,3] : Only the scenario and clipping steps are computed -- Useful for scenario changes
|
227
|
+
- [3] : Only the clipping step is computed -- Useful if simulation area changes but scenario is the same
|
228
|
+
|
229
|
+
"""
|
230
|
+
|
231
|
+
dirsnames = Accept_Manager(main_dir, Study_area, scenario=scenario)
|
232
|
+
|
233
|
+
if not dirsnames.check_before_vulnerability():
|
234
|
+
logging.error("The necessary files are missing - Verify logs for more information")
|
235
|
+
return
|
236
|
+
|
237
|
+
logging.info("Starting VULNERABILITY computations at {} m resolution".format(resolution))
|
238
|
+
|
239
|
+
dirsnames.change_dir()
|
240
|
+
|
241
|
+
if 1 in steps:
|
242
|
+
# Step 1 : Compute the vulnerability rasters for the study area
|
243
|
+
# The data **will not** be impacted by the scenario modifications
|
244
|
+
|
245
|
+
logging.info("Generate Vulnerability rasters {}m".format(resolution))
|
246
|
+
|
247
|
+
cleaning_directory(dirsnames.TMP_SCEN_DIR)
|
248
|
+
|
249
|
+
Comp_Vulnerability(dirsnames)
|
250
|
+
|
251
|
+
if 2 in steps:
|
252
|
+
# Step 2 : Compute the vulnerability rasters for the scenario
|
253
|
+
# The data **will be** impacted by the scenario modifications
|
254
|
+
|
255
|
+
if not dirsnames.check_vuln_code_sa():
|
256
|
+
logging.error("The vulnerability and code files for the study area are missing")
|
257
|
+
logging.warning("Force the computation even if not prescribed in the steps")
|
258
|
+
|
259
|
+
Vulnerability(main_dir, scenario, Study_area, resolution, [1])
|
260
|
+
|
261
|
+
bu:list[Path] = dirsnames.get_files_in_rm_buildings()
|
262
|
+
|
263
|
+
if len(bu)>0:
|
264
|
+
for curfile in bu:
|
265
|
+
tiff_file = dirsnames.TMP_RM_BUILD_DIR / (curfile.stem + ".tiff")
|
266
|
+
shp_to_raster(curfile, tiff_file)
|
267
|
+
|
268
|
+
Comp_Vulnerability_Scen(dirsnames)
|
269
|
+
else:
|
270
|
+
logging.warning(f"No buildings were removed in water depth analysis OR No shapefiles in {dirsnames.IN_RM_BUILD_DIR}")
|
271
|
+
|
272
|
+
if 3 in steps:
|
273
|
+
# Step 3 : Clip the vulnerability/code rasters to the **simulation area**
|
274
|
+
|
275
|
+
logging.info("Save Vulnerability files for the area of interest")
|
276
|
+
|
277
|
+
return_periods = dirsnames.get_return_periods()
|
278
|
+
TMAX = dirsnames.get_filepath_for_return_period(return_periods[-1])
|
279
|
+
|
280
|
+
if TMAX is None:
|
281
|
+
logging.error("The file for the maximum return period is missing")
|
282
|
+
return
|
283
|
+
|
284
|
+
match_vuln_modrec(dirsnames.SA_MASKED_RIVER,dirsnames.OUT_MASKED_RIVER, TMAX)
|
285
|
+
match_vuln_modrec(dirsnames.SA_VULN, dirsnames.OUT_VULN, TMAX)
|
286
|
+
match_vuln_modrec(dirsnames.SA_CODE, dirsnames.OUT_CODE, TMAX)
|
287
|
+
|
288
|
+
dirsnames.restore_dir()
|
289
|
+
|
290
|
+
def Acceptability(main_dir:str = 'Vesdre',
|
291
|
+
scenario:str = 'Scenario1',
|
292
|
+
Study_area:str = 'Bassin_Vesdre.shp'):
|
293
|
+
""" Compute acceptability for the scenario """
|
294
|
+
|
295
|
+
dirsnames = Accept_Manager(main_dir, Study_area, scenario=scenario)
|
296
|
+
|
297
|
+
dirsnames.change_dir()
|
298
|
+
|
299
|
+
# Load the vulnerability raster **for the scenario**
|
300
|
+
vulne = gdal.Open(str(dirsnames.OUT_VULN))
|
301
|
+
# Convert to numpy array
|
302
|
+
vulne = vulne.GetRasterBand(1).ReadAsArray()
|
303
|
+
|
304
|
+
# Load the river mask
|
305
|
+
riv = gdal.Open(str(dirsnames.OUT_MASKED_RIVER))
|
306
|
+
|
307
|
+
# Get the geotransform and projection for the output tiff
|
308
|
+
geotrans = riv.GetGeoTransform()
|
309
|
+
proj = riv.GetProjection()
|
310
|
+
|
311
|
+
# Convert to numpy array
|
312
|
+
riv = riv.GetRasterBand(1).ReadAsArray()
|
313
|
+
|
314
|
+
# Get the return periods available
|
315
|
+
return_periods = dirsnames.get_return_periods()
|
316
|
+
|
317
|
+
# Prepare the river bed filter
|
318
|
+
# Useful as we iterate over the return periods
|
319
|
+
# and the river bed is the same for all return periods
|
320
|
+
ij_riv = np.where(riv == 1)
|
321
|
+
|
322
|
+
# Compute acceptability for each return period
|
323
|
+
for curT in tqdm(return_periods):
|
324
|
+
|
325
|
+
# Load the **FILLED** modelled water depth for the return period
|
326
|
+
model_h = gdal.Open(str(dirsnames.get_sim_file_for_return_period(curT)))
|
327
|
+
# Convert to numpy array
|
328
|
+
model_h = model_h.GetRasterBand(1).ReadAsArray()
|
329
|
+
|
330
|
+
# Set nan if the water depth is 0
|
331
|
+
model_h[model_h == 0] = np.nan
|
332
|
+
# Set nan in the river bed
|
333
|
+
model_h[ij_riv] = np.nan
|
334
|
+
|
335
|
+
logging.info("Return period {}".format(curT))
|
336
|
+
# Compute the local acceptability for the return period
|
337
|
+
VulMod(dirsnames, model_h, vulne, curT, (geotrans, proj))
|
338
|
+
|
339
|
+
# At this point, the local acceptability for each return period is computed
|
340
|
+
# and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory
|
341
|
+
|
342
|
+
# Get the list of Q files
|
343
|
+
qs = dirsnames.get_q_files()
|
344
|
+
# Initialize the dictionary to store the acceptability values
|
345
|
+
q_dict = {}
|
346
|
+
|
347
|
+
# Iterate over the return periods
|
348
|
+
for curT in return_periods:
|
349
|
+
logging.info("vm"+str(curT))
|
350
|
+
|
351
|
+
# We set the filename from the return period, not the "qs" list
|
352
|
+
q_filename = dirsnames.TMP_QFILES / "Q{}.tif".format(curT)
|
353
|
+
|
354
|
+
# Check if the file exists
|
355
|
+
assert q_filename.exists(), "The file {} does not exist".format(q_filename)
|
356
|
+
# Check if the file is in the "qs" list
|
357
|
+
assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
|
358
|
+
|
359
|
+
# Load the Q file for the return period
|
360
|
+
tmp_data = gdal.Open(str(q_filename))
|
361
|
+
# Convert to numpy array
|
362
|
+
q_dict["vm"+str(curT)] = tmp_data.GetRasterBand(1).ReadAsArray()
|
363
|
+
|
364
|
+
# Force the deletion of the variable, rather than waiting for the garbage collector
|
365
|
+
# May be useful if the files are large
|
366
|
+
del tmp_data
|
367
|
+
|
368
|
+
# Pointing the last return period, maybe 1000 but not always
|
369
|
+
array_t1000 = q_dict["vm{}".format(return_periods[-1])]
|
370
|
+
# Get the indices where the value is -99999
|
371
|
+
# We will force the same mask for all lower return periods
|
372
|
+
ij_t1000 = np.where(array_t1000 == -99999)
|
373
|
+
|
374
|
+
# Iterate over the return periods
|
375
|
+
for curT in return_periods:
|
376
|
+
|
377
|
+
if curT != return_periods[-1]:
|
378
|
+
logging.info(curT)
|
379
|
+
|
380
|
+
# Alias
|
381
|
+
tmp_array = q_dict["vm{}".format(curT)]
|
382
|
+
|
383
|
+
# Set the -99999 values to 0
|
384
|
+
tmp_array[tmp_array == -99999] = 0.
|
385
|
+
# Set the masked values, for the last return period, to nan
|
386
|
+
tmp_array[ij_t1000] = np.nan
|
387
|
+
|
388
|
+
# # Load the ponderation file
|
389
|
+
# pond = pd.read_csv(dirsnames.PONDERATION_CSV)
|
390
|
+
# # Set the index to the interval, so we can use the interval as a key
|
391
|
+
# pond.set_index("Interval", inplace=True)
|
392
|
+
|
393
|
+
# Get ponderations for the return periods
|
394
|
+
pond = dirsnames.get_ponderations()
|
395
|
+
|
396
|
+
assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
|
397
|
+
assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
|
398
|
+
|
399
|
+
# Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
|
400
|
+
comb = np.zeros(q_dict["vm{}".format(return_periods[-1])].shape)
|
401
|
+
|
402
|
+
for curT in return_periods:
|
403
|
+
comb += q_dict["vm{}".format(curT)] * pond["Ponderation"][curT]
|
404
|
+
|
405
|
+
y_pixels, x_pixels = comb.shape # number of pixels in x
|
406
|
+
|
407
|
+
# Set up output GeoTIFF
|
408
|
+
driver = gdal.GetDriverByName('GTiff')
|
409
|
+
dataset = driver.Create(str(dirsnames.OUT_ACCEPT), x_pixels, y_pixels, 1, gdal.GDT_Float32, options=["COMPRESS=LZW"])
|
410
|
+
dataset.GetRasterBand(1).WriteArray(comb.astype(np.float32))
|
411
|
+
dataset.SetGeoTransform(geotrans)
|
412
|
+
dataset.SetProjection(proj)
|
413
|
+
dataset.FlushCache()
|
414
|
+
del(dataset)
|
415
|
+
|
416
|
+
# Resample to 100m
|
417
|
+
Agg = gdal.Warp(str(dirsnames.OUT_ACCEPT_100M), str(dirsnames.OUT_ACCEPT), xRes=100, yRes=100, resampleAlg='Average')
|
418
|
+
del(Agg)
|
419
|
+
|
420
|
+
dirsnames.restore_dir()
|