wolfhece 2.1.28__py3-none-any.whl → 2.1.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wolfhece/PyDraw.py CHANGED
@@ -7489,9 +7489,9 @@ class WolfMapViewer(wx.Frame):
7489
7489
  self._set_active_bc()
7490
7490
 
7491
7491
  #Print info in the status bar
7492
- txt = 'Dx : {:4f} ; Dy : {:4f}'.format(self.active_array.dx, self.active_array.dy)
7493
- txt += ' ; Xmin : {:4f} ; Ymin : {:4f}'.format(self.active_array.origx, self.active_array.origy)
7494
- txt += ' ; Xmax : {:4f} ; Ymax : {:4f}'.format(self.active_array.origx + self.active_array.dx * float(self.active_array.nbx),
7492
+ txt = 'Dx : {:.4f} ; Dy : {:.4f}'.format(self.active_array.dx, self.active_array.dy)
7493
+ txt += ' ; Xmin : {:.4f} ; Ymin : {:.4f}'.format(self.active_array.origx, self.active_array.origy)
7494
+ txt += ' ; Xmax : {:.4f} ; Ymax : {:.4f}'.format(self.active_array.origx + self.active_array.dx * float(self.active_array.nbx),
7495
7495
  self.active_array.origy + self.active_array.dy * float(self.active_array.nby))
7496
7496
  txt += ' ; Nx : {:d} ; Ny : {:d}'.format(self.active_array.nbx, self.active_array.nby)
7497
7497
 
wolfhece/PyPalette.py CHANGED
@@ -333,6 +333,14 @@ class wolfpalette(wx.Frame,LinearSegmentedColormap):
333
333
  self.nb = i
334
334
  self.values=self.values[0:i]
335
335
  self.colors=self.colors[0:i,:]
336
+ else:
337
+ self.nb = i
338
+ oldvalues = self.values
339
+ oldcolors = self.colors
340
+ self.values = np.zeros(self.nb , dtype=float)
341
+ self.colors = np.zeros((self.nb,4) , dtype=int)
342
+ self.values[0:len(oldvalues)] = oldvalues
343
+ self.colors[0:len(oldcolors),:] = oldcolors
336
344
 
337
345
  update = False
338
346
 
@@ -1,6 +1,6 @@
1
1
  from .Parallels import parallel_gpd_clip, parallel_v2r, parallel_datamod
2
2
  from .func import data_modification, compute_vulnerability, compute_vulnerability4scenario
3
- from .func import match_vulnerability2sim, compute_acceptability, shp_to_raster
3
+ from .func import match_vulnerability2sim, compute_acceptability, shp_to_raster, clip_layer
4
4
  from .func import Accept_Manager, cleaning_directory, EXTENT, Vulnerability_csv, compute_code
5
5
 
6
6
  import pandas as pd
@@ -38,6 +38,14 @@ class steps_vulnerability(Enum):
38
38
  APPLY_MODIFS = 2
39
39
  MATCH_SIMUL = 3
40
40
 
41
+ class steps_acceptability(Enum):
42
+ """
43
+ Enum for the steps in the acceptability computation
44
+ """
45
+ COMPUTE_LOCAL_ACCEPT = 1
46
+ LOAD_FROM_FILES = 2
47
+ COMPUTE_MEAN_ACCEPT = 3
48
+
41
49
  def Base_data_creation(main_dir:str = 'Data',
42
50
  Original_gdb:str = 'GT_Resilence_dataRisques202010.gdb',
43
51
  Study_area:str = 'Bassin_Vesdre.shp',
@@ -47,7 +55,7 @@ def Base_data_creation(main_dir:str = 'Data',
47
55
  resolution:float = 1.,
48
56
  number_procs:int = 8,
49
57
  steps:list[int] | list[steps_base_data_creation] = [1,2,3,4,5,6,7],
50
- Vulnerability_csv:str = 'Vulnerability.csv'):
58
+ Vuln_csv:str = 'Vulnerability.csv'):
51
59
  """
52
60
  Create the databse.
53
61
 
@@ -79,13 +87,12 @@ def Base_data_creation(main_dir:str = 'Data',
79
87
  CaPa_Walloon=CaPa_Walloon,
80
88
  PICC_Walloon=PICC_Walloon,
81
89
  CE_IGN_top10v=CE_IGN_top10v,
82
- Vulnerability_csv=Vulnerability_csv)
90
+ Vuln_csv=Vuln_csv)
83
91
 
84
92
  if not manager.check_before_database_creation():
85
93
  logging.error("The necessary files are missing - Verify logs for more information")
86
94
  return
87
95
 
88
- manager.change_dir()
89
96
  done = []
90
97
 
91
98
  if 1 in steps or 6 in steps or steps_base_data_creation.PREPROCESS_VULNCODE in steps or steps_base_data_creation.CLIP_GDB in steps:
@@ -146,7 +153,9 @@ def Base_data_creation(main_dir:str = 'Data',
146
153
  if 5 in steps or steps_base_data_creation.RASTERIZE_IGN in steps:
147
154
  # ********************************************************************************************************************
148
155
  # Step 5 : Rasaterize the IGN data "Course d'eau" to get the riverbed mask
149
- shp_to_raster(manager.CE_IGN_TOP10V, manager.SA_MASKED_RIVER, resolution)
156
+ LAYER_IGN = "CE_IGN_TOP10V"
157
+ clip_layer(layer=LAYER_IGN, file_path=manager.CE_IGN_TOP10V, Study_Area=manager.SA, output_dir=manager.TMP_IGNCE)
158
+ shp_to_raster(manager.TMP_IGNCE / (LAYER_IGN + '.gpkg'), manager.SA_MASKED_RIVER, resolution, manager=manager)
150
159
 
151
160
  done.append(steps_base_data_creation.RASTERIZE_IGN)
152
161
 
@@ -204,19 +213,17 @@ def Base_data_creation(main_dir:str = 'Data',
204
213
  Study_area,
205
214
  resolution,
206
215
  number_procs=number_procs,
207
- Vulnerability_csv=Vulnerability_csv)
216
+ Vuln_csv=Vuln_csv)
208
217
 
209
218
  done.append(steps_base_data_creation.DATABASE_TO_RASTER)
210
219
 
211
- manager.restore_dir()
212
-
213
220
  return done
214
221
 
215
222
  def Database_to_raster(main_dir:str = 'Data',
216
223
  Study_area:str = 'Bassin_Vesdre.shp',
217
224
  resolution:float = 1.,
218
225
  number_procs:int = 16,
219
- Vulnerability_csv:str = 'Vulnerability.csv'):
226
+ Vuln_csv:str = 'Vulnerability.csv'):
220
227
  """
221
228
  Convert the vector database to raster database based on their vulnerability values
222
229
 
@@ -235,7 +242,7 @@ def Database_to_raster(main_dir:str = 'Data',
235
242
  The parallel processing is safe as each layer is processed independently.
236
243
  """
237
244
 
238
- manager = Accept_Manager(main_dir, Study_area, Vulnerability_csv=Vulnerability_csv)
245
+ manager = Accept_Manager(main_dir, Study_area, Vuln_csv=Vuln_csv)
239
246
 
240
247
  resolution = float(resolution)
241
248
 
@@ -243,22 +250,18 @@ def Database_to_raster(main_dir:str = 'Data',
243
250
  logging.error("The necessary files are missing - Verify logs for more information")
244
251
  return
245
252
 
246
- manager.change_dir()
247
-
248
253
  logging.info("Convert vectors to raster based on their vulnerability values")
249
254
 
250
255
  attributes = ["Vulne", "Code"]
251
256
  for cur_attrib in attributes:
252
257
  parallel_v2r(manager, cur_attrib, resolution, number_procs, convert_to_sparse=True)
253
258
 
254
- manager.restore_dir()
255
-
256
259
  def Vulnerability(main_dir:str = 'Data',
257
260
  scenario:str = 'Scenario1',
258
261
  Study_area:str = 'Bassin_Vesdre.shp',
259
262
  resolution:float = 1.,
260
263
  steps:list[int] | list[steps_vulnerability] = [1,10,11,2,3],
261
- Vulnerability_csv:str = 'Vulnerability.csv',
264
+ Vuln_csv:str = 'Vulnerability.csv',
262
265
  Intermediate_csv:str = 'Intermediate.csv'):
263
266
  """
264
267
  Compute the vulnerability for the study area and the scenario, if needed.
@@ -289,7 +292,7 @@ def Vulnerability(main_dir:str = 'Data',
289
292
  manager = Accept_Manager(main_dir,
290
293
  Study_area,
291
294
  scenario=scenario,
292
- Vulnerability_csv=Vulnerability_csv,
295
+ Vuln_csv=Vuln_csv,
293
296
  Intermediate_csv=Intermediate_csv)
294
297
 
295
298
  if not manager.check_before_vulnerability():
@@ -298,7 +301,6 @@ def Vulnerability(main_dir:str = 'Data',
298
301
 
299
302
  logging.info("Starting VULNERABILITY computations at {} m resolution".format(resolution))
300
303
 
301
- manager.change_dir()
302
304
  done = []
303
305
 
304
306
  if 1 in steps or steps_vulnerability.CREATE_RASTERS in steps:
@@ -360,28 +362,26 @@ def Vulnerability(main_dir:str = 'Data',
360
362
 
361
363
  done.append(steps_vulnerability.MATCH_SIMUL)
362
364
 
363
- manager.restore_dir()
364
-
365
365
  return done
366
366
 
367
367
  def Acceptability(main_dir:str = 'Vesdre',
368
368
  scenario:str = 'Scenario1',
369
369
  Study_area:str = 'Bassin_Vesdre.shp',
370
370
  coeff_auto:bool = True,
371
- Ponderation_csv:str = 'Ponderation.csv'):
371
+ Ponderation_csv:str = 'Ponderation.csv',
372
+ resample_size:int = 100,
373
+ steps:list[int] | list[steps_acceptability] = [1,2,3]):
372
374
  """ Compute acceptability for the scenario """
373
375
 
376
+ done = []
377
+
374
378
  manager = Accept_Manager(main_dir,
375
379
  Study_area,
376
380
  scenario=scenario,
377
381
  Ponderation_csv=Ponderation_csv)
378
382
 
379
- manager.change_dir()
380
-
381
383
  # Load the vulnerability raster **for the scenario**
382
384
  vulne = gdal.Open(str(manager.OUT_VULN))
383
- # Convert to numpy array
384
- vulne = vulne.GetRasterBand(1).ReadAsArray()
385
385
 
386
386
  # Load the river mask
387
387
  riv = gdal.Open(str(manager.OUT_MASKED_RIVER))
@@ -390,8 +390,12 @@ def Acceptability(main_dir:str = 'Vesdre',
390
390
  geotrans = riv.GetGeoTransform()
391
391
  proj = riv.GetProjection()
392
392
 
393
+ assert vulne.GetGeoTransform() == riv.GetGeoTransform(), "The geotransform of the two rasters is different"
394
+ assert vulne.GetProjection() == riv.GetProjection(), "The projection of the two rasters is different"
395
+
393
396
  # Convert to numpy array
394
- riv = riv.GetRasterBand(1).ReadAsArray()
397
+ vulne = vulne.GetRasterBand(1).ReadAsArray()
398
+ riv = riv.GetRasterBand(1).ReadAsArray()
395
399
 
396
400
  # Get the return periods available
397
401
  return_periods = manager.get_return_periods()
@@ -399,116 +403,140 @@ def Acceptability(main_dir:str = 'Vesdre',
399
403
  # Prepare the river bed filter
400
404
  # Useful as we iterate over the return periods
401
405
  # and the river bed is the same for all return periods
402
- ij_riv = np.where(riv == 1)
406
+ ij_riv = np.argwhere(riv == 1)
403
407
 
404
- # Compute acceptability for each return period
405
- for curT in tqdm(return_periods):
406
-
407
- # Load the **FILLED** modelled water depth for the return period
408
- model_h = gdal.Open(str(manager.get_sim_file_for_return_period(curT)))
409
- # Convert to numpy array
410
- model_h = model_h.GetRasterBand(1).ReadAsArray()
411
-
412
- # Set nan if the water depth is 0.
413
- # 'nan' is a good choice as it will not be considered in the computation.
414
- model_h[model_h == 0] = np.nan
415
- # Set nan in the river bed
416
- model_h[ij_riv] = np.nan
417
-
418
- logging.info("Return period {}".format(curT))
419
- # Compute the local acceptability for the return period
420
- compute_acceptability(manager, model_h, vulne, curT, (geotrans, proj))
421
-
422
- # At this point, the local acceptability for each return period is computed
423
- # and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory
424
-
425
- # Get the list of Q files
426
- qs = manager.get_q_files()
427
408
  # Initialize the dictionary to store the acceptability values
428
- q_dict = {}
409
+ part_accept = {}
429
410
 
430
- # Iterate over the return periods
431
- for curT in return_periods:
432
- logging.info("vm"+str(curT))
411
+ if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
412
+ # Compute acceptability for each return period
413
+ for curT in tqdm(return_periods):
433
414
 
434
- # We set the filename from the return period, not the "qs" list
435
- q_filename = manager.TMP_QFILES / "Q{}.tif".format(curT)
436
-
437
- # Check if the file exists
438
- assert q_filename.exists(), "The file {} does not exist".format(q_filename)
439
- # Check if the file is in the "qs" list
440
- assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
441
-
442
- # Load the Q file for the return period
443
- tmp_data = gdal.Open(str(q_filename))
444
- # Convert to numpy array
445
- q_dict["vm"+str(curT)] = tmp_data.GetRasterBand(1).ReadAsArray()
446
-
447
- # Force the deletion of the variable, rather than waiting for the garbage collector
448
- # May be useful if the files are large
449
- del tmp_data
450
-
451
- # Pointing the last return period, maybe 1000 but not always
452
- array_t1000 = q_dict["vm{}".format(return_periods[-1])]
453
- # Get the indices where the value is -99999
454
- # We will force the same mask for all lower return periods
455
- ij_t1000 = np.where(array_t1000 == -99999)
456
-
457
- # Iterate over the return periods
458
- for curT in return_periods:
459
-
460
- if curT != return_periods[-1]:
461
- logging.info(curT)
462
-
463
- # Alias
464
- tmp_array = q_dict["vm{}".format(curT)]
465
-
466
- # Set the -99999 values to 0
467
- tmp_array[tmp_array == -99999] = 0.
468
- # Set the masked values, for the last return period, to nan
469
- tmp_array[ij_t1000] = np.nan
470
-
471
- # # Load the ponderation file
472
- # pond = pd.read_csv(dirsnames.PONDERATION_CSV)
473
- # # Set the index to the interval, so we can use the interval as a key
474
- # pond.set_index("Interval", inplace=True)
475
-
476
- # Get ponderations for the return periods
477
- if coeff_auto:
478
- pond = manager.get_ponderations()
479
- assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
480
-
481
- elif manager.is_valid_ponderation_csv:
482
- pond = pd.read_csv(manager.PONDERATION_CSV)
483
- pond.set_index("Interval", inplace=True)
484
- else:
485
- logging.error("The ponderation file is missing")
486
- logging.info("Please provide the ponderation file or set 'coeff_auto' to True")
487
- return
415
+ # Load the **FILLED** modelled water depth for the return period
416
+ model_h = gdal.Open(str(manager.get_sim_file_for_return_period(curT)))
417
+ # Convert to numpy array
418
+ model_h = model_h.GetRasterBand(1).ReadAsArray()
419
+
420
+ assert model_h.shape == vulne.shape, "The shape of the modelled water depth is different from the vulnerability raster"
421
+
422
+ # Set 0. if the water depth is 0.
423
+ model_h[model_h == 0] = 0
424
+ # Set 0. in the river bed
425
+ model_h[ij_riv[:,0], ij_riv[:,1]] = 0
426
+
427
+ assert model_h[ij_riv[0][0], ij_riv[0][1]] == 0, "The river bed is not set to 0 in the modelled water depth"
428
+ assert model_h.max() > 0, "The maximum water depth is 0"
429
+ if model_h.min() < 0:
430
+ logging.warning("The minimum water depth is negative - {} cells".format(np.count_nonzero(model_h<0)))
431
+ logging.warning("Setting the negative values to 0")
432
+ model_h[model_h < 0] = 0
433
+
434
+ logging.info("Return period {}".format(curT))
435
+
436
+ # Compute the local acceptability for the return period
437
+ part_accept[curT] = compute_acceptability(manager, model_h, vulne, curT, (geotrans, proj))
488
438
 
489
- assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
439
+ done.append(steps_acceptability.COMPUTE_LOCAL_ACCEPT)
490
440
 
491
- # Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
492
- comb = np.zeros(q_dict["vm{}".format(return_periods[-1])].shape)
441
+ # At this point, the local acceptability for each return period is computed
442
+ # and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory.
443
+ # The arrays are also stored in the part_accept dictionary.
444
+
445
+ if 2 in steps or steps_acceptability.LOAD_FROM_FILES in steps:
446
+ # Load/Reload the acceptability values from files
447
+
448
+ if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
449
+ # We have computed/updted the acceptibility values.
450
+ # We do not need to reload them.
451
+ logging.warning("The acceptability values have been computed in step 1 - avoid reloading")
452
+ logging.info("If you want to reload the acceptability values, please remove step 1 from the list of steps")
453
+ else:
493
454
 
494
- for curT in return_periods:
495
- comb += q_dict["vm{}".format(curT)] * pond["Ponderation"][curT]
455
+ # Get the list of Q files
456
+ qs = manager.get_q_files()
457
+
458
+ # Iterate over the return periods
459
+ for curT in return_periods:
460
+ logging.info(curT)
461
+
462
+ # We set the filename from the return period, not the "qs" list
463
+ q_filename = manager.TMP_QFILES / "Q{}.tif".format(curT)
464
+
465
+ # Check if the file exists
466
+ assert q_filename.exists(), "The file {} does not exist".format(q_filename)
467
+ # Check if the file is in the "qs" list
468
+ assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
469
+
470
+ # Load the Q file for the return period
471
+ tmp_data = gdal.Open(str(q_filename))
472
+ # Convert to numpy array
473
+ part_accept[curT] = tmp_data.GetRasterBand(1).ReadAsArray()
474
+
475
+ done.append(steps_acceptability.LOAD_FROM_FILES)
476
+
477
+ if 3 in steps or steps_acceptability.COMPUTE_MEAN_ACCEPT in steps:
478
+
479
+ assert len(part_accept) == len(return_periods), "The number of acceptability files is not equal to the number of return periods"
480
+
481
+ # Pointing the last return period, maybe 1000 but not always
482
+ array_tmax = part_accept[return_periods[-1]]
483
+
484
+ # Get ponderations for the return periods
485
+ if coeff_auto:
486
+ logging.info("Automatic ponderation")
487
+ pond = manager.get_ponderations()
488
+ assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
489
+
490
+ elif manager.is_valid_ponderation_csv:
491
+ logging.info("Manual ponderation")
492
+ # Load the ponderation file
493
+ pond = pd.read_csv(manager.PONDERATION_CSV)
494
+ # Set the index to the interval, so we can use the interval as a key
495
+ pond.set_index("Interval", inplace=True)
496
+
497
+ else:
498
+ logging.error("The ponderation file is missing")
499
+ logging.info("Please provide the ponderation file or set 'coeff_auto' to True")
500
+ return -1
496
501
 
497
- y_pixels, x_pixels = comb.shape # number of pixels in x
502
+ assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
498
503
 
499
- # Set up output GeoTIFF
500
- driver = gdal.GetDriverByName('GTiff')
501
- dataset = driver.Create(str(manager.OUT_ACCEPT), x_pixels, y_pixels, 1, gdal.GDT_Float32, options=["COMPRESS=LZW"])
502
- dataset.GetRasterBand(1).WriteArray(comb.astype(np.float32))
503
- dataset.SetGeoTransform(geotrans)
504
- dataset.SetProjection(proj)
505
- dataset.FlushCache()
506
- del(dataset)
504
+ # Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
505
+ comb = np.zeros(part_accept[return_periods[-1]].shape, dtype=np.float32)
506
+
507
+ for curT in return_periods:
508
+ assert part_accept[curT].dtype == np.float32, "The dtype of the acceptability matrix is not np.float32"
509
+ assert part_accept[curT].shape == comb.shape, "The shape of the acceptability matrix is not the right one"
507
510
 
508
- # Resample to 100m
509
- Agg = gdal.Warp(str(manager.OUT_ACCEPT_100M), str(manager.OUT_ACCEPT), xRes=100, yRes=100, resampleAlg='Average')
510
- del(Agg)
511
+ comb += part_accept[curT] * float(pond["Ponderation"][curT])
511
512
 
512
- manager.restore_dir()
513
+ y_pixels, x_pixels = comb.shape # number of pixels in x
513
514
 
514
- return 0
515
+ # Set up output GeoTIFF
516
+ driver = gdal.GetDriverByName('GTiff')
517
+ dataset = driver.Create(str(manager.OUT_ACCEPT),
518
+ x_pixels, y_pixels,
519
+ 1,
520
+ gdal.GDT_Float32,
521
+ options=["COMPRESS=LZW"])
522
+
523
+ assert comb.dtype == np.float32, "The dtype of the combined acceptability matrix is not np.float32"
524
+
525
+ dataset.GetRasterBand(1).WriteArray(comb)
526
+ dataset.SetGeoTransform(geotrans)
527
+ dataset.SetProjection(proj)
528
+ dataset.FlushCache()
529
+ dataset=None
530
+
531
+ # Resample to XXm
532
+ Agg = gdal.Warp(str(manager.OUT_ACCEPT_100M),
533
+ str(manager.OUT_ACCEPT),
534
+ xRes=resample_size,
535
+ yRes=resample_size,
536
+ resampleAlg='Average')
537
+ Agg.FlushCache()
538
+ Agg = None
539
+
540
+ done.append(steps_acceptability.COMPUTE_MEAN_ACCEPT)
541
+
542
+ return done