wolfhece 2.1.29__py3-none-any.whl → 2.1.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,6 +38,14 @@ class steps_vulnerability(Enum):
38
38
  APPLY_MODIFS = 2
39
39
  MATCH_SIMUL = 3
40
40
 
41
+ class steps_acceptability(Enum):
42
+ """
43
+ Enum for the steps in the acceptability computation
44
+ """
45
+ COMPUTE_LOCAL_ACCEPT = 1
46
+ LOAD_FROM_FILES = 2
47
+ COMPUTE_MEAN_ACCEPT = 3
48
+
41
49
  def Base_data_creation(main_dir:str = 'Data',
42
50
  Original_gdb:str = 'GT_Resilence_dataRisques202010.gdb',
43
51
  Study_area:str = 'Bassin_Vesdre.shp',
@@ -85,7 +93,6 @@ def Base_data_creation(main_dir:str = 'Data',
85
93
  logging.error("The necessary files are missing - Verify logs for more information")
86
94
  return
87
95
 
88
- manager.change_dir()
89
96
  done = []
90
97
 
91
98
  if 1 in steps or 6 in steps or steps_base_data_creation.PREPROCESS_VULNCODE in steps or steps_base_data_creation.CLIP_GDB in steps:
@@ -210,8 +217,6 @@ def Base_data_creation(main_dir:str = 'Data',
210
217
 
211
218
  done.append(steps_base_data_creation.DATABASE_TO_RASTER)
212
219
 
213
- manager.restore_dir()
214
-
215
220
  return done
216
221
 
217
222
  def Database_to_raster(main_dir:str = 'Data',
@@ -245,16 +250,12 @@ def Database_to_raster(main_dir:str = 'Data',
245
250
  logging.error("The necessary files are missing - Verify logs for more information")
246
251
  return
247
252
 
248
- manager.change_dir()
249
-
250
253
  logging.info("Convert vectors to raster based on their vulnerability values")
251
254
 
252
255
  attributes = ["Vulne", "Code"]
253
256
  for cur_attrib in attributes:
254
257
  parallel_v2r(manager, cur_attrib, resolution, number_procs, convert_to_sparse=True)
255
258
 
256
- manager.restore_dir()
257
-
258
259
  def Vulnerability(main_dir:str = 'Data',
259
260
  scenario:str = 'Scenario1',
260
261
  Study_area:str = 'Bassin_Vesdre.shp',
@@ -300,7 +301,6 @@ def Vulnerability(main_dir:str = 'Data',
300
301
 
301
302
  logging.info("Starting VULNERABILITY computations at {} m resolution".format(resolution))
302
303
 
303
- manager.change_dir()
304
304
  done = []
305
305
 
306
306
  if 1 in steps or steps_vulnerability.CREATE_RASTERS in steps:
@@ -362,24 +362,24 @@ def Vulnerability(main_dir:str = 'Data',
362
362
 
363
363
  done.append(steps_vulnerability.MATCH_SIMUL)
364
364
 
365
- manager.restore_dir()
366
-
367
365
  return done
368
366
 
369
367
  def Acceptability(main_dir:str = 'Vesdre',
370
368
  scenario:str = 'Scenario1',
371
369
  Study_area:str = 'Bassin_Vesdre.shp',
372
370
  coeff_auto:bool = True,
373
- Ponderation_csv:str = 'Ponderation.csv'):
371
+ Ponderation_csv:str = 'Ponderation.csv',
372
+ resample_size:int = 100,
373
+ steps:list[int] | list[steps_acceptability] = [1,2,3]):
374
374
  """ Compute acceptability for the scenario """
375
375
 
376
+ done = []
377
+
376
378
  manager = Accept_Manager(main_dir,
377
379
  Study_area,
378
380
  scenario=scenario,
379
381
  Ponderation_csv=Ponderation_csv)
380
382
 
381
- manager.change_dir()
382
-
383
383
  # Load the vulnerability raster **for the scenario**
384
384
  vulne = gdal.Open(str(manager.OUT_VULN))
385
385
 
@@ -395,7 +395,7 @@ def Acceptability(main_dir:str = 'Vesdre',
395
395
 
396
396
  # Convert to numpy array
397
397
  vulne = vulne.GetRasterBand(1).ReadAsArray()
398
- riv = riv.GetRasterBand(1).ReadAsArray()
398
+ riv = riv.GetRasterBand(1).ReadAsArray()
399
399
 
400
400
  # Get the return periods available
401
401
  return_periods = manager.get_return_periods()
@@ -403,116 +403,140 @@ def Acceptability(main_dir:str = 'Vesdre',
403
403
  # Prepare the river bed filter
404
404
  # Useful as we iterate over the return periods
405
405
  # and the river bed is the same for all return periods
406
- ij_riv = np.where(riv == 1)
407
-
408
- # Compute acceptability for each return period
409
- for curT in tqdm(return_periods):
410
-
411
- # Load the **FILLED** modelled water depth for the return period
412
- model_h = gdal.Open(str(manager.get_sim_file_for_return_period(curT)))
413
- # Convert to numpy array
414
- model_h = model_h.GetRasterBand(1).ReadAsArray()
415
-
416
- # Set nan if the water depth is 0.
417
- # 'nan' is a good choice as it will not be considered in the computation.
418
- model_h[model_h == 0] = np.nan
419
- # Set nan in the river bed
420
- model_h[ij_riv] = np.nan
421
-
422
- logging.info("Return period {}".format(curT))
423
- # Compute the local acceptability for the return period
424
- compute_acceptability(manager, model_h, vulne, curT, (geotrans, proj))
425
-
426
- # At this point, the local acceptability for each return period is computed
427
- # and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory
406
+ ij_riv = np.argwhere(riv == 1)
428
407
 
429
- # Get the list of Q files
430
- qs = manager.get_q_files()
431
408
  # Initialize the dictionary to store the acceptability values
432
- q_dict = {}
409
+ part_accept = {}
433
410
 
434
- # Iterate over the return periods
435
- for curT in return_periods:
436
- logging.info("vm"+str(curT))
411
+ if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
412
+ # Compute acceptability for each return period
413
+ for curT in tqdm(return_periods):
437
414
 
438
- # We set the filename from the return period, not the "qs" list
439
- q_filename = manager.TMP_QFILES / "Q{}.tif".format(curT)
440
-
441
- # Check if the file exists
442
- assert q_filename.exists(), "The file {} does not exist".format(q_filename)
443
- # Check if the file is in the "qs" list
444
- assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
445
-
446
- # Load the Q file for the return period
447
- tmp_data = gdal.Open(str(q_filename))
448
- # Convert to numpy array
449
- q_dict["vm"+str(curT)] = tmp_data.GetRasterBand(1).ReadAsArray()
450
-
451
- # Force the deletion of the variable, rather than waiting for the garbage collector
452
- # May be useful if the files are large
453
- del tmp_data
454
-
455
- # Pointing the last return period, maybe 1000 but not always
456
- array_t1000 = q_dict["vm{}".format(return_periods[-1])]
457
- # Get the indices where the value is -99999
458
- # We will force the same mask for all lower return periods
459
- ij_t1000 = np.where(array_t1000 == -99999)
460
-
461
- # Iterate over the return periods
462
- for curT in return_periods:
463
-
464
- if curT != return_periods[-1]:
465
- logging.info(curT)
466
-
467
- # Alias
468
- tmp_array = q_dict["vm{}".format(curT)]
469
-
470
- # Set the -99999 values to 0
471
- tmp_array[tmp_array == -99999] = 0.
472
- # Set the masked values, for the last return period, to nan
473
- tmp_array[ij_t1000] = np.nan
474
-
475
- # # Load the ponderation file
476
- # pond = pd.read_csv(dirsnames.PONDERATION_CSV)
477
- # # Set the index to the interval, so we can use the interval as a key
478
- # pond.set_index("Interval", inplace=True)
479
-
480
- # Get ponderations for the return periods
481
- if coeff_auto:
482
- pond = manager.get_ponderations()
483
- assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
484
-
485
- elif manager.is_valid_ponderation_csv:
486
- pond = pd.read_csv(manager.PONDERATION_CSV)
487
- pond.set_index("Interval", inplace=True)
488
- else:
489
- logging.error("The ponderation file is missing")
490
- logging.info("Please provide the ponderation file or set 'coeff_auto' to True")
491
- return
415
+ # Load the **FILLED** modelled water depth for the return period
416
+ model_h = gdal.Open(str(manager.get_sim_file_for_return_period(curT)))
417
+ # Convert to numpy array
418
+ model_h = model_h.GetRasterBand(1).ReadAsArray()
419
+
420
+ assert model_h.shape == vulne.shape, "The shape of the modelled water depth is different from the vulnerability raster"
421
+
422
+ # Set 0. if the water depth is 0.
423
+ model_h[model_h == 0] = 0
424
+ # Set 0. in the river bed
425
+ model_h[ij_riv[:,0], ij_riv[:,1]] = 0
426
+
427
+ assert model_h[ij_riv[0][0], ij_riv[0][1]] == 0, "The river bed is not set to 0 in the modelled water depth"
428
+ assert model_h.max() > 0, "The maximum water depth is 0"
429
+ if model_h.min() < 0:
430
+ logging.warning("The minimum water depth is negative - {} cells".format(np.count_nonzero(model_h<0)))
431
+ logging.warning("Setting the negative values to 0")
432
+ model_h[model_h < 0] = 0
433
+
434
+ logging.info("Return period {}".format(curT))
435
+
436
+ # Compute the local acceptability for the return period
437
+ part_accept[curT] = compute_acceptability(manager, model_h, vulne, curT, (geotrans, proj))
438
+
439
+ done.append(steps_acceptability.COMPUTE_LOCAL_ACCEPT)
492
440
 
493
- assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
441
+ # At this point, the local acceptability for each return period is computed
442
+ # and stored in tiff files in the TEMP/SutyArea/scenario/Q_FILES directory.
443
+ # The arrays are also stored in the part_accept dictionary.
494
444
 
495
- # Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
496
- comb = np.zeros(q_dict["vm{}".format(return_periods[-1])].shape)
445
+ if 2 in steps or steps_acceptability.LOAD_FROM_FILES in steps:
446
+ # Load/Reload the acceptability values from files
447
+
448
+ if 1 in steps or steps_acceptability.COMPUTE_LOCAL_ACCEPT in steps:
449
+ # We have computed/updted the acceptibility values.
450
+ # We do not need to reload them.
451
+ logging.warning("The acceptability values have been computed in step 1 - avoid reloading")
452
+ logging.info("If you want to reload the acceptability values, please remove step 1 from the list of steps")
453
+ else:
497
454
 
498
- for curT in return_periods:
499
- comb += q_dict["vm{}".format(curT)] * pond["Ponderation"][curT]
455
+ # Get the list of Q files
456
+ qs = manager.get_q_files()
457
+
458
+ # Iterate over the return periods
459
+ for curT in return_periods:
460
+ logging.info(curT)
461
+
462
+ # We set the filename from the return period, not the "qs" list
463
+ q_filename = manager.TMP_QFILES / "Q{}.tif".format(curT)
464
+
465
+ # Check if the file exists
466
+ assert q_filename.exists(), "The file {} does not exist".format(q_filename)
467
+ # Check if the file is in the "qs" list
468
+ assert q_filename in qs, "The file {} is not in the list of Q files".format(q_filename)
469
+
470
+ # Load the Q file for the return period
471
+ tmp_data = gdal.Open(str(q_filename))
472
+ # Convert to numpy array
473
+ part_accept[curT] = tmp_data.GetRasterBand(1).ReadAsArray()
474
+
475
+ done.append(steps_acceptability.LOAD_FROM_FILES)
476
+
477
+ if 3 in steps or steps_acceptability.COMPUTE_MEAN_ACCEPT in steps:
478
+
479
+ assert len(part_accept) == len(return_periods), "The number of acceptability files is not equal to the number of return periods"
480
+
481
+ # Pointing the last return period, maybe 1000 but not always
482
+ array_tmax = part_accept[return_periods[-1]]
483
+
484
+ # Get ponderations for the return periods
485
+ if coeff_auto:
486
+ logging.info("Automatic ponderation")
487
+ pond = manager.get_ponderations()
488
+ assert pond["Ponderation"].sum() > 0.999999 and pond["Ponderation"].sum()<1.0000001, "The sum of the ponderations is not equal to 1"
489
+
490
+ elif manager.is_valid_ponderation_csv:
491
+ logging.info("Manual ponderation")
492
+ # Load the ponderation file
493
+ pond = pd.read_csv(manager.PONDERATION_CSV)
494
+ # Set the index to the interval, so we can use the interval as a key
495
+ pond.set_index("Interval", inplace=True)
496
+
497
+ else:
498
+ logging.error("The ponderation file is missing")
499
+ logging.info("Please provide the ponderation file or set 'coeff_auto' to True")
500
+ return -1
500
501
 
501
- y_pixels, x_pixels = comb.shape # number of pixels in x
502
+ assert len(pond) == len(return_periods), "The number of ponderations is not equal to the number of return periods"
502
503
 
503
- # Set up output GeoTIFF
504
- driver = gdal.GetDriverByName('GTiff')
505
- dataset = driver.Create(str(manager.OUT_ACCEPT), x_pixels, y_pixels, 1, gdal.GDT_Float32, options=["COMPRESS=LZW"])
506
- dataset.GetRasterBand(1).WriteArray(comb.astype(np.float32))
507
- dataset.SetGeoTransform(geotrans)
508
- dataset.SetProjection(proj)
509
- dataset.FlushCache()
510
- del(dataset)
504
+ # Initialize the combined acceptability matrix -- Ponderate mean of the local acceptability
505
+ comb = np.zeros(part_accept[return_periods[-1]].shape, dtype=np.float32)
506
+
507
+ for curT in return_periods:
508
+ assert part_accept[curT].dtype == np.float32, "The dtype of the acceptability matrix is not np.float32"
509
+ assert part_accept[curT].shape == comb.shape, "The shape of the acceptability matrix is not the right one"
511
510
 
512
- # Resample to 100m
513
- Agg = gdal.Warp(str(manager.OUT_ACCEPT_100M), str(manager.OUT_ACCEPT), xRes=100, yRes=100, resampleAlg='Average')
514
- del(Agg)
511
+ comb += part_accept[curT] * float(pond["Ponderation"][curT])
515
512
 
516
- manager.restore_dir()
513
+ y_pixels, x_pixels = comb.shape # number of pixels in x
517
514
 
518
- return 0
515
+ # Set up output GeoTIFF
516
+ driver = gdal.GetDriverByName('GTiff')
517
+ dataset = driver.Create(str(manager.OUT_ACCEPT),
518
+ x_pixels, y_pixels,
519
+ 1,
520
+ gdal.GDT_Float32,
521
+ options=["COMPRESS=LZW"])
522
+
523
+ assert comb.dtype == np.float32, "The dtype of the combined acceptability matrix is not np.float32"
524
+
525
+ dataset.GetRasterBand(1).WriteArray(comb)
526
+ dataset.SetGeoTransform(geotrans)
527
+ dataset.SetProjection(proj)
528
+ dataset.FlushCache()
529
+ dataset=None
530
+
531
+ # Resample to XXm
532
+ Agg = gdal.Warp(str(manager.OUT_ACCEPT_100M),
533
+ str(manager.OUT_ACCEPT),
534
+ xRes=resample_size,
535
+ yRes=resample_size,
536
+ resampleAlg='Average')
537
+ Agg.FlushCache()
538
+ Agg = None
539
+
540
+ done.append(steps_acceptability.COMPUTE_MEAN_ACCEPT)
541
+
542
+ return done
@@ -1328,9 +1328,9 @@ def compute_vulnerability4scenario(manager:Accept_Manager):
1328
1328
  array_mod = gdal.Open(str(curfile))
1329
1329
  array_mod = np.array(array_mod.GetRasterBand(1).ReadAsArray())
1330
1330
 
1331
- ij = np.where(array_mod == 1)
1332
- array_vuln[ij] = 1
1333
- array_code[ij] = 1
1331
+ ij = np.argwhere(array_mod == 1)
1332
+ array_vuln[ij[:,0], ij[:,1]] = 1
1333
+ array_code[ij[:,0], ij[:,1]] = 1
1334
1334
 
1335
1335
  dst_filename= str(manager.TMP_VULN)
1336
1336
  y_pixels, x_pixels = array_vuln.shape # number of pixels in x
@@ -1382,11 +1382,20 @@ def match_vulnerability2sim(inRas:Path, outRas:Path, MODREC:Path):
1382
1382
  ds = gdal.Translate(outRas, ds, projWin = [minx, maxy, maxx, miny])
1383
1383
  ds = None
1384
1384
 
1385
+
1386
+ @nb.jit(nopython=True, boundscheck=False, inline='always')
1387
+ def update_accept(accept, model_h, ij, bounds, loc_accept):
1388
+ for idx in range(len(bounds)):
1389
+ for i,j in ij:
1390
+ if bounds[idx,0] < model_h[i,j] <= bounds[idx,1]:
1391
+ accept[i,j] = loc_accept[idx]
1392
+
1385
1393
  def compute_acceptability(manager:Accept_Manager,
1386
1394
  model_h:np.ndarray,
1387
1395
  vulnerability:np.ndarray,
1388
1396
  interval:int,
1389
- geo_projection):
1397
+ geo_projection:tuple,
1398
+ save_to_file:bool=True) -> np.ndarray:
1390
1399
 
1391
1400
  """
1392
1401
  Compute the local acceptability based on :
@@ -1394,7 +1403,7 @@ def compute_acceptability(manager:Accept_Manager,
1394
1403
  - the water depth
1395
1404
  - the matrices
1396
1405
 
1397
- :param dirsnames: the Dirs_Names object from the calling function
1406
+ :param manager: the Accept_Manager object from the calling function
1398
1407
  :param model_h: the water depth matrix
1399
1408
  :param vulnerability: the vulnerability matrix
1400
1409
  :param interval: the return period
@@ -1404,48 +1413,48 @@ def compute_acceptability(manager:Accept_Manager,
1404
1413
 
1405
1414
  logging.info(interval)
1406
1415
 
1407
- Qfile = pd.read_csv(manager.POINTS_CSV)
1416
+ points_accept = pd.read_csv(manager.POINTS_CSV)
1408
1417
 
1409
- Qfile = Qfile[Qfile["Interval"]==interval]
1410
- Qfile = Qfile.reset_index()
1418
+ points_accept = points_accept[points_accept["Interval"]==interval]
1419
+ points_accept = points_accept.reset_index()
1411
1420
 
1412
- x,y = vulnerability.shape
1413
- accept = np.zeros((x,y))
1421
+ accept = np.zeros(vulnerability.shape, dtype=np.float32)
1414
1422
 
1415
- ij_1 = np.where(vulnerability == 1)
1416
- ij_2 = np.where(vulnerability == 2)
1417
- ij_3 = np.where(vulnerability == 3)
1418
- ij_4 = np.where(vulnerability == 4)
1419
- ij_5 = np.where(vulnerability == 5)
1423
+ bounds = np.asarray([[0., 0.02], [0.02, 0.3], [0.3, 1], [1, 2.5], [2.5, 1000]], dtype=np.float32)
1420
1424
 
1421
- bounds = [(0., 0.02), (0.02, 0.3), (0.3, 1), (1, 2.5), (2.5, 1000)]
1425
+ for i in range(1,6):
1426
+ ij = np.argwhere(vulnerability == i)
1422
1427
 
1423
- accept_1 = [Qfile["h-0"][4], Qfile["h-0.02"][4], Qfile["h-0.3"][4], Qfile["h-1"][4], Qfile["h-2.5"][4]]
1424
- accept_2 = [Qfile["h-0"][3], Qfile["h-0.02"][3], Qfile["h-0.3"][3], Qfile["h-1"][3], Qfile["h-2.5"][3]]
1425
- accept_3 = [Qfile["h-0"][2], Qfile["h-0.02"][2], Qfile["h-0.3"][2], Qfile["h-1"][2], Qfile["h-2.5"][2]]
1426
- accept_4 = [Qfile["h-0"][1], Qfile["h-0.02"][1], Qfile["h-0.3"][1], Qfile["h-1"][1], Qfile["h-2.5"][1]]
1427
- accept_5 = [Qfile["h-0"][0], Qfile["h-0.02"][0], Qfile["h-0.3"][0], Qfile["h-1"][0], Qfile["h-2.5"][0]]
1428
+ idx_pts = 5-i
1429
+ accept_pts = [points_accept["h-0"][idx_pts],
1430
+ points_accept["h-0.02"][idx_pts],
1431
+ points_accept["h-0.3"][idx_pts],
1432
+ points_accept["h-1"][idx_pts],
1433
+ points_accept["h-2.5"][idx_pts]]
1434
+
1435
+ update_accept(accept, model_h, ij, bounds, accept_pts)
1428
1436
 
1429
- accept[:,:] = -99999
1430
- for ij, loc_accept in zip([ij_1, ij_2, ij_3, ij_4, ij_5], [accept_1, accept_2, accept_3, accept_4, accept_5]):
1431
- if len(ij[0]) > 0:
1432
- for idx, (min_bound, max_bound) in enumerate(bounds):
1433
- loc_ij = np.where((model_h[ij] > min_bound) & (model_h[ij] <= max_bound))
1434
- accept[ij[0][loc_ij], ij[1][loc_ij]] = loc_accept[idx]
1437
+ if save_to_file:
1438
+ #save raster
1439
+ dst_filename = str(manager.TMP_QFILES / "Q{}.tif".format(interval))
1435
1440
 
1436
- #save raster
1437
- dst_filename = str(manager.TMP_QFILES / "Q{}.tif".format(interval))
1441
+ y_pixels, x_pixels = accept.shape # number of pixels in x
1442
+ driver = gdal.GetDriverByName('GTiff')
1443
+ dataset = driver.Create(dst_filename,
1444
+ x_pixels, y_pixels,
1445
+ 1,
1446
+ gdal.GDT_Float32,
1447
+ options=["COMPRESS=LZW"])
1438
1448
 
1439
- y_pixels, x_pixels = accept.shape # number of pixels in x
1440
- driver = gdal.GetDriverByName('GTiff')
1441
- dataset = driver.Create(dst_filename, x_pixels, y_pixels, 1, gdal.GDT_Float32, options=["COMPRESS=LZW"])
1442
- dataset.GetRasterBand(1).WriteArray(accept.astype(np.float32))
1449
+ dataset.GetRasterBand(1).WriteArray(accept.astype(np.float32))
1443
1450
 
1444
- geotrans, proj = geo_projection
1445
- dataset.SetGeoTransform(geotrans)
1446
- dataset.SetProjection(proj)
1447
- dataset.FlushCache()
1448
- dataset = None
1451
+ geotrans, proj = geo_projection
1452
+ dataset.SetGeoTransform(geotrans)
1453
+ dataset.SetProjection(proj)
1454
+ dataset.FlushCache()
1455
+ dataset = None
1456
+
1457
+ return accept
1449
1458
 
1450
1459
  def shp_to_raster(vector_fn:str, raster_fn:str, pixel_size:float = 1., manager:Accept_Manager = None):
1451
1460
  """
wolfhece/apps/version.py CHANGED
@@ -5,7 +5,7 @@ class WolfVersion():
5
5
 
6
6
  self.major = 2
7
7
  self.minor = 1
8
- self.patch = 29
8
+ self.patch = 30
9
9
 
10
10
  def __str__(self):
11
11
 
wolfhece/libs/WolfOGL.c CHANGED
@@ -20436,7 +20436,7 @@ static PyObject *__pyx_f_7wolfogl_mapColor(float __pyx_v_zValue, __Pyx_memviewsl
20436
20436
  * if zValue <= colorValues[0]:
20437
20437
  * return colorPalette[0,:] # <<<<<<<<<<<<<<
20438
20438
  * elif (zValue >= colorValues[paletteSize-1]):
20439
- * return colorPalette[-1,:]
20439
+ * return colorPalette[paletteSize-1,:]
20440
20440
  */
20441
20441
  __Pyx_XDECREF(__pyx_r);
20442
20442
  __pyx_t_3.data = __pyx_v_colorPalette.data;
@@ -20473,7 +20473,7 @@ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_3, 1, (PyObject *(*)(char *)) __p
20473
20473
  * if zValue <= colorValues[0]:
20474
20474
  * return colorPalette[0,:]
20475
20475
  * elif (zValue >= colorValues[paletteSize-1]): # <<<<<<<<<<<<<<
20476
- * return colorPalette[-1,:]
20476
+ * return colorPalette[paletteSize-1,:]
20477
20477
  * else:
20478
20478
  */
20479
20479
  __pyx_t_1 = (__pyx_v_paletteSize - 1);
@@ -20483,7 +20483,7 @@ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_3, 1, (PyObject *(*)(char *)) __p
20483
20483
  /* "WolfOGL.pyx":1585
20484
20484
  * return colorPalette[0,:]
20485
20485
  * elif (zValue >= colorValues[paletteSize-1]):
20486
- * return colorPalette[-1,:] # <<<<<<<<<<<<<<
20486
+ * return colorPalette[paletteSize-1,:] # <<<<<<<<<<<<<<
20487
20487
  * else:
20488
20488
  * for i in range(1,paletteSize):
20489
20489
  */
@@ -20492,7 +20492,7 @@ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_3, 1, (PyObject *(*)(char *)) __p
20492
20492
  __pyx_t_3.memview = __pyx_v_colorPalette.memview;
20493
20493
  __PYX_INC_MEMVIEW(&__pyx_t_3, 1);
20494
20494
  {
20495
- Py_ssize_t __pyx_tmp_idx = -1L;
20495
+ Py_ssize_t __pyx_tmp_idx = (__pyx_v_paletteSize - 1);
20496
20496
  Py_ssize_t __pyx_tmp_stride = __pyx_v_colorPalette.strides[0];
20497
20497
  __pyx_t_3.data += __pyx_tmp_idx * __pyx_tmp_stride;
20498
20498
  }
@@ -20513,13 +20513,13 @@ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_3, 1, (PyObject *(*)(char *)) __p
20513
20513
  * if zValue <= colorValues[0]:
20514
20514
  * return colorPalette[0,:]
20515
20515
  * elif (zValue >= colorValues[paletteSize-1]): # <<<<<<<<<<<<<<
20516
- * return colorPalette[-1,:]
20516
+ * return colorPalette[paletteSize-1,:]
20517
20517
  * else:
20518
20518
  */
20519
20519
  }
20520
20520
 
20521
20521
  /* "WolfOGL.pyx":1587
20522
- * return colorPalette[-1,:]
20522
+ * return colorPalette[paletteSize-1,:]
20523
20523
  * else:
20524
20524
  * for i in range(1,paletteSize): # <<<<<<<<<<<<<<
20525
20525
  * if zValue <= colorValues[i]:
@@ -20756,7 +20756,7 @@ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_3, 1, (PyObject *(*)(char *)) __p
20756
20756
  }
20757
20757
 
20758
20758
  /* "WolfOGL.pyx":1587
20759
- * return colorPalette[-1,:]
20759
+ * return colorPalette[paletteSize-1,:]
20760
20760
  * else:
20761
20761
  * for i in range(1,paletteSize): # <<<<<<<<<<<<<<
20762
20762
  * if zValue <= colorValues[i]:
wolfhece/libs/WolfOGL.pyx CHANGED
@@ -1582,7 +1582,7 @@ cpdef mapColor(float zValue, double[:] colorValues, int paletteSize, double[:,:]
1582
1582
  if zValue <= colorValues[0]:
1583
1583
  return colorPalette[0,:]
1584
1584
  elif (zValue >= colorValues[paletteSize-1]):
1585
- return colorPalette[-1,:]
1585
+ return colorPalette[paletteSize-1,:]
1586
1586
  else:
1587
1587
  for i in range(1,paletteSize):
1588
1588
  if zValue <= colorValues[i]:
Binary file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: wolfhece
3
- Version: 2.1.29
3
+ Version: 2.1.30
4
4
  Author-email: Pierre Archambeau <pierre.archambeau@uliege.be>
5
5
  License: AGPL-v3 License
6
6
  Project-URL: Homepage, https://uee.uliege.be/hece
@@ -58,11 +58,10 @@ wolfhece/wolfresults_2D.py,sha256=wF-wIyqpTrUJX_fT-QCVuNxLZCgUsqK9ptGz8izpyIQ,16
58
58
  wolfhece/xyz_file.py,sha256=aQOcTHkHRhXHxL_WxTHwzygp6e47San7SHSpxKQU0dw,5457
59
59
  wolfhece/acceptability/Parallels.py,sha256=wpCdwkqR6PAFeRkV5TvSSL33Vf368j-bvYcl7D1Y-sc,3695
60
60
  wolfhece/acceptability/__init__.py,sha256=hfgoPKLDpX7drN1Vpvux-_5Lfyc_7feT2C2zQr5v-Os,258
61
- wolfhece/acceptability/acceptability.py,sha256=usCGc5Yn9zqA9RxZDxCL957_SOEbLVfjAqZ7EBCoi0A,22184
62
- wolfhece/acceptability/acceptability1.py,sha256=rf1Bu2JuyOPwMxvez7z5vCXrePAV486hyVM5g1f40g4,13045
61
+ wolfhece/acceptability/acceptability.py,sha256=xhLwqVqUjlBm29yQK23cr9pr87Z5PB3Lzr1Kw8DA5D4,24145
63
62
  wolfhece/acceptability/acceptability_gui.py,sha256=zzbHd_e90fLhbgrdBlnWmBWBO8ZBwb8vikhl-2Rdy0M,12020
64
63
  wolfhece/acceptability/cli.py,sha256=pIh9hIbM5RQFh3EBQJB2jWJ8F2M4l-D6qGoewXROE1M,7102
65
- wolfhece/acceptability/func.py,sha256=cEhCNITNtByR8Yu8JNddzN4NpUNczABykPALOULJLh8,61206
64
+ wolfhece/acceptability/func.py,sha256=z4CI2OYtVa1vfC_v5dEhgG3uA1IijppfwewJMBDPt6g,61192
66
65
  wolfhece/apps/ManageParams.py,sha256=heg5L4fMn0ettR7Bad_Q680o_JWnTbe3WFkL_9IziAk,312
67
66
  wolfhece/apps/Optimisation_hydro.py,sha256=mHazBazTUGyxPbHPXhaQim8vqIeOOuKPjH0B48VWduA,374
68
67
  wolfhece/apps/WolfPython.png,sha256=K3dcbeZUiJCFNwOAAlGMaRGLJ56yM8WD2I_0bk0xT1g,104622
@@ -73,7 +72,7 @@ wolfhece/apps/check_install.py,sha256=jrKR-njqnpIh6ZJqvP6KbDUPVCfwTNQj4glQhcyzs9
73
72
  wolfhece/apps/curvedigitizer.py,sha256=avWERHuVxPnJBOD_ibczwW_XG4vAenqWS8W1zjhBox8,4898
74
73
  wolfhece/apps/isocurrent.py,sha256=4XnNWPa8mYUK7V4zdDRFrHFIXNG2AN2og3TqWKKcqjY,3811
75
74
  wolfhece/apps/splashscreen.py,sha256=EjEjZGuWV-8ZfHhnFH4XLrrtB-YpzPDVhFzRrjgFUzI,2624
76
- wolfhece/apps/version.py,sha256=Qdfdhlr83QguXgzhqvMXWV1V9PBFkF57mUmEltEzFbM,388
75
+ wolfhece/apps/version.py,sha256=DOnhV0acM8wlohylXhs6wwbgGzeJWQwuSC3SgXqFsRg,388
77
76
  wolfhece/apps/wolf.py,sha256=gqfm-ZaUJqNsfCzmdtemSeqLw-GVdSVix-evg5WArJI,293
78
77
  wolfhece/apps/wolf2D.py,sha256=gWD9ee2-1pw_nUxjgRaJMuSe4kUT-RWhOeoTt_Lh1mM,267
79
78
  wolfhece/apps/wolf_logo.bmp,sha256=ruJ4MA51CpGO_AYUp_dB4SWKHelvhOvd7Q8NrVOjDJk,3126
@@ -173,8 +172,8 @@ wolfhece/lazviewer/viewer/viewer.exe,sha256=pF5nwE8vMWlEzkk-SOekae9zpOsPhTWhZbqa
173
172
  wolfhece/lazviewer/viewer/viewer.py,sha256=8_MQCaQOS0Z_oRPiGoRy1lq-aCirReX3hWEBjQID0ig,24665
174
173
  wolfhece/libs/MSVCP140.dll,sha256=2GrBWBI6JFuSdZLIDMAg_qKcjErdwURGbEYloAypx3o,565640
175
174
  wolfhece/libs/WolfDll.dll,sha256=E8SeV0AHVXW5ikAQuVtijqIvaYx7UIMeqvnnsmTMCT8,132934144
176
- wolfhece/libs/WolfOGL.c,sha256=wYazkWV1hDvO41YM4bDZOgYBBfoDYedc-DDgShBGiZA,1753612
177
- wolfhece/libs/WolfOGL.pyx,sha256=NR-PLXp6KvxsyU1xHaacIxve3nKC-KPGhZsTdXj9E1w,80267
175
+ wolfhece/libs/WolfOGL.c,sha256=Dm_4cI9NHFAMhs4cskjcQStpeIYjedOrHPqlcHeQVAk,1753700
176
+ wolfhece/libs/WolfOGL.pyx,sha256=YmA3NPLt9zPo-UarGnz33dFdSmAc2Vaul5I1_0EWYKo,80278
178
177
  wolfhece/libs/api-ms-win-crt-heap-l1-1-0.dll,sha256=r0euvgZa8vBFoZ8g7H5Upuc8DD6aUQimMJWnIyt1OBo,19720
179
178
  wolfhece/libs/api-ms-win-crt-math-l1-1-0.dll,sha256=ol0GVN6wzqGu8Ym6IXTQ8TvfUvCY06nsNtFeS_swxJk,27912
180
179
  wolfhece/libs/api-ms-win-crt-runtime-l1-1-0.dll,sha256=NxpEq5FhSowm0Vm-uHKntD9WnLX6yK2pms6Y8mSjtQM,23304
@@ -202,7 +201,7 @@ wolfhece/libs/vcomp100.dll,sha256=NKvXc8hc4MrFa9k8ErALA6OmldGfR3zidaZPCZhMVJI,57
202
201
  wolfhece/libs/vcruntime140.dll,sha256=YYMpkONk3KW_osYdkw8ArKrm0aqjEwOSQDRVrpoRJaU,89880
203
202
  wolfhece/libs/vcruntime140_1.dll,sha256=FVS1gClo_bJwWmfLthWF6VYLnkKdBDpap0LvPJu_tr8,37240
204
203
  wolfhece/libs/verify_license.cp310-win_amd64.pyd,sha256=-lobNXvqiN7vNl7zzrZWOBPsGW13qT77JWiSy2D3FCk,92672
205
- wolfhece/libs/wolfogl.cp310-win_amd64.pyd,sha256=Yj-fHOu02Ln24hKcIvCmMZAV7VDBpNxeSZXpDe44iWY,286720
204
+ wolfhece/libs/wolfogl.cp310-win_amd64.pyd,sha256=K6HTq5bFSRv9y-k6wbdO4qMPJcZQKvozCYThsltx6Yk,286720
206
205
  wolfhece/libs/wolfpy.cp310-win_amd64.pyd,sha256=6omqEaxmQll-Gg24e90wVomAB9rO_tyyOES2FewXn58,36457472
207
206
  wolfhece/libs/zlib1.dll,sha256=E9a0e62VgmG1A8ohZzhVCmmfGtbyXxXu4aFeADTNJ30,77824
208
207
  wolfhece/libs/GL/gl.h,sha256=IhsS_fOLa8GW9MpiLZebe9QYRy6uIB_qK_uQMWMOoeg,46345
@@ -281,8 +280,8 @@ wolfhece/ui/wolf_multiselection_collapsiblepane.py,sha256=yGbU_JsF56jsmms0gh7mxa
281
280
  wolfhece/ui/wolf_times_selection_comparison_models.py,sha256=wCxGRnE3kzEkWlWA6-3X8ADOFux_B0a5QWJ2GnXTgJw,4709
282
281
  wolfhece/wintab/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
283
282
  wolfhece/wintab/wintab.py,sha256=8A-JNONV6ujgsgG3lM5Uw-pVgglPATwKs86oBzzljoc,7179
284
- wolfhece-2.1.29.dist-info/METADATA,sha256=CR6XfFiJ-Gm_rvZi-D7OXxMjiwMk0SiImv_sX1p_4kc,2356
285
- wolfhece-2.1.29.dist-info/WHEEL,sha256=-oYQCr74JF3a37z2nRlQays_SX2MqOANoqVjBBAP2yE,91
286
- wolfhece-2.1.29.dist-info/entry_points.txt,sha256=MAG6NrF64fcxiVNb2g1JPYPGcn9C0HWtqqNurB83oX0,330
287
- wolfhece-2.1.29.dist-info/top_level.txt,sha256=EfqZXMVCn7eILUzx9xsEu2oBbSo9liWPFWjIHik0iCI,9
288
- wolfhece-2.1.29.dist-info/RECORD,,
283
+ wolfhece-2.1.30.dist-info/METADATA,sha256=3IcGhCz-MaGj4jP2HATNkudS-Ae869L61Ec7nyjt5rE,2356
284
+ wolfhece-2.1.30.dist-info/WHEEL,sha256=rWxmBtp7hEUqVLOnTaDOPpR-cZpCDkzhhcBce-Zyd5k,91
285
+ wolfhece-2.1.30.dist-info/entry_points.txt,sha256=MAG6NrF64fcxiVNb2g1JPYPGcn9C0HWtqqNurB83oX0,330
286
+ wolfhece-2.1.30.dist-info/top_level.txt,sha256=EfqZXMVCn7eILUzx9xsEu2oBbSo9liWPFWjIHik0iCI,9
287
+ wolfhece-2.1.30.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (71.0.3)
2
+ Generator: setuptools (71.0.4)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,211 +0,0 @@
1
- # import pandas as pd
2
- # import Parallels
3
- # import os
4
- # import func
5
- # from osgeo import gdal
6
- # import fiona
7
- # import glob
8
- # import numpy as np
9
- # import geopandas as gpd
10
-
11
- # def Vulnerability2(main_dir, resolution):
12
- # os.chdir(main_dir)
13
- # print("STEP2: convert vectors to raster based on their vulnerability values")
14
- # layer = fiona.listlayers(os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg")
15
- # database = os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg"
16
- # extent = os.getcwd()+"//INPUT//STUDY_AREA//Bassin_SA.shp"
17
- # pixel=resolution
18
- # attribute = "Vulne"
19
- # parallel_v2r(layer, database, extent, attribute, pixel)
20
- # attribute = "Code"
21
- # parallel_v2r(layer, database, extent, attribute, pixel)
22
-
23
- # def base_data_creation(main_dir, Original_gdb, Study_Area, CaPa_Walloon, PICC_Walloon):
24
- # #Change the directory
25
- # os.chdir(main_dir)
26
- # # Step 1, Clip GDB data
27
- # file_path=os.getcwd()+"//INPUT//DATABASE//"+str(Original_gdb)
28
- # Study_Area=os.getcwd()+"//INPUT//STUDY_AREA//"+str(Study_Area)
29
- # data_type="OpenfileGDB"
30
- # number_procs = 8
31
- # output_gpkg = os.getcwd()+"//TEMP//DATABASES//SA_database.gpkg"
32
- # paths = pd.read_csv(os.getcwd()+"//INPUT//CSVs//Vulnerability_matrix_new1.csv", sep=",", encoding='latin-1')
33
- # paths["subfolder"]=None
34
- # x, y = paths.shape
35
- # for i in range(x):
36
- # a=paths["Path"][i].split('/')
37
- # paths["subfolder"][i]=a[1]
38
- # layers = paths["subfolder"].to_list()
39
- # Parallels.parallel_gpd_clip(layers, file_path, Study_Area, output_gpkg, data_type, number_procs)
40
- # # Step 2, Clip Cadaster data
41
- # file_path=os.getcwd()+"//INPUT//DATABASE//"+str(CaPa_Walloon)
42
- # data_type='GPKG'
43
- # number_procs = 8
44
- # output_gpkg = os.getcwd()+"//TEMP//DATABASES//SA_CaPa.gpkg"
45
- # layers = ["CaBu", "CaPa"]
46
- # Parallels.parallel_gpd_clip(layers, file_path, Study_Area, output_gpkg, data_type, number_procs)
47
- # # Step 3, Clip PICC data
48
- # file_path=os.getcwd()+"//INPUT//DATABASE//"+str(PICC_Walloon)
49
- # data_type='OpenfileGDB'
50
- # number_procs = 8
51
- # output_gpkg = os.getcwd()+"//TEMP//DATABASES//SA_PICC.gpkg"
52
- # layers=['CONSTR_BATIEMPRISE']
53
- # Parallels.parallel_gpd_clip(layers, file_path, Study_Area, output_gpkg, data_type, number_procs)
54
- # #Step 4, create database based on changes in report
55
- # input_database=os.getcwd()+"//TEMP//DATABASES//SA_database.gpkg"
56
- # layers = fiona.listlayers(os.getcwd()+"//TEMP//DATABASES//SA_database.gpkg")
57
- # walous = ["WALOUS_2018_LB72_112", "WALOUS_2018_LB72_31", "WALOUS_2018_LB72_32", "WALOUS_2018_LB72_331",
58
- # "WALOUS_2018_LB72_332", "WALOUS_2018_LB72_333", "WALOUS_2018_LB72_34"]
59
- # data_type="GPKG"
60
- # PICC = gpd.read_file(os.getcwd()+"//TEMP//DATABASES//SA_PICC.gpkg", driver="GPKG", layer = 'CONSTR_BATIEMPRISE')
61
- # CaPa = gpd.read_file(os.getcwd()+"//TEMP//DATABASES//SA_CaPa.gpkg", driver='GPKG', layer= 'CaPa')
62
- # output_database = os.getcwd()+"//TEMP//DATABASES//SA_database_final.gpkg"
63
- # for i in range(len(layers)):
64
- # print(i)
65
- # func.data_modification(input_database, data_type, layers[i], walous, output_database, PICC, CaPa)
66
- # func.shp_to_raster(os.getcwd()+"//INPUT//DATABASE//CE_IGN_TOP10V/CE_IGN_TOP10V.shp", os.getcwd()+"//TEMP//DATABASES//CE_IGN_TOP10V.tiff")
67
- # #Pre-processing for Vulnerability
68
- # layers = fiona.listlayers(os.getcwd()+"//TEMP//DATABASES//SA_database_final.gpkg")
69
- # paths = pd.read_csv(os.getcwd()+"//INPUT//CSVs//Vulnerability_matrix_new1.csv", sep=",", encoding='latin-1')
70
- # paths[["name", "name1"]] = paths["Path"].str.split("/", expand=True)
71
- # names = paths["name1"].to_list()
72
- # list_shp = list(set(names).difference(layers))
73
- # print("Excluded layers due to no features in shapefiles:")
74
- # print(list_shp)
75
- # paths1 =paths[~paths["name1"].isin(list_shp)]
76
- # a,b = paths1.shape
77
- # print("STEP1: Saving the database for Vulnerability with attributes Vulne and Code")
78
- # for i in range(a):
79
- # shp = gpd.read_file(os.getcwd()+"//TEMP//DATABASES//SA_database_final.gpkg",
80
- # driver='GPKG',
81
- # layer=paths1["name1"][i])
82
- # x, y = shp.shape
83
- # if x > 0:
84
- # shp["Path"] = paths["name1"][i]
85
- # shp["Vulne"] = paths["Vulne"][i]
86
- # shp["Code"] = paths["Code"][i]
87
- # shp = shp[["geometry", "Path", "Vulne","Code"]]
88
- # shp.to_file(os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg",
89
- # driver='GPKG',
90
- # layer=paths["name1"][i])
91
- # print("STEP2: convert vectors to raster based on their vulnerability values")
92
- # layer = fiona.listlayers(os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg")
93
- # database = os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg"
94
- # extent = os.getcwd()+"//INPUT//STUDY_AREA//Bassin_SA.shp"
95
- # attribute = "Vulne"
96
- # Parallels.parallel_v2r(layer, database, extent, attribute)
97
- # attribute = "Code"
98
- # Parallels.parallel_v2r(layer, database, extent, attribute)
99
- # #
100
- # def Vulnerability(main_dir,sc,AOI):
101
- # print("Starting VULNERABILITY computations at 1 m resolution")
102
- # os.chdir(main_dir)
103
- # # layers = fiona.listlayers(os.getcwd()+"//TEMP//DATABASES//SA_database_final.gpkg")
104
- # # # load the paths from csv with Vulne values
105
- # # paths = pd.read_csv(os.getcwd()+"//INPUT//CSVs//Vulnerability_matrix_new1.csv", sep=",", encoding='latin-1')
106
- # # paths[["name", "name1"]] = paths["Path"].str.split("/", expand=True)
107
- # # #names = paths["name1"].to_list()
108
- # # # loop for loading all shapefiles with the names matching with vulnerability matrix
109
- # # names = paths["name1"].to_list()
110
- # # list_shp = list(set(names).difference(layers))
111
- # # print("Excluded layers due to no features in shapefiles:")
112
- # # print(list_shp)
113
- # # paths1 =paths[~paths["name1"].isin(list_shp)]
114
- # # a,b = paths1.shape
115
- # # print("STEP1: Saving the database for Vulnerability with attributes Vulne and Code")
116
- # # for i in range(a):
117
- # # shp = gpd.read_file(os.getcwd()+"//TEMP//DATABASES//SA_database_final.gpkg",
118
- # # driver='GPKG',
119
- # # layer=paths1["name1"][i])
120
- # # x, y = shp.shape
121
- # # if x > 0:
122
- # # shp["Path"] = paths["name1"][i]
123
- # # shp["Vulne"] = paths["Vulne"][i]
124
- # # shp["Code"] = paths["Code"][i]
125
- # # shp = shp[["geometry", "Path", "Vulne","Code"]]
126
- # # shp.to_file(os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg",
127
- # # driver='GPKG',
128
- # # layer=paths["name1"][i])
129
- # # print("STEP2: convert vectors to raster based on their vulnerability values")
130
- # # layer = fiona.listlayers(os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg")
131
- # # database = os.getcwd()+"//TEMP//DATABASES//SA_database_final_V.gpkg"
132
- # # extent = os.getcwd()+"//INPUT//STUDY_AREA//Bassin_SA.shp"
133
- # # attribute = "Vulne"
134
- # # Parallels.parallel_v2r(layer, database, extent, attribute)
135
- # # attribute = "Code"
136
- # # Parallels.parallel_v2r(layer, database, extent, attribute)
137
- # bu = glob.glob(os.getcwd()+"//INPUT//REMOVED_BUILDINGS//Scenario"+str(sc)+"//*.shp")
138
- # if len(bu)>0:
139
- # bu_PICC = os.getcwd()+"//INPUT//REMOVED_BUILDINGS//Scenario"+str(sc)+"//Removed_Buildings_PICC.shp"
140
- # bu_CaBu = os.getcwd()+"//INPUT//REMOVED_BUILDINGS//Scenario"+str(sc)+"//Removed_Buildings_CaBu.shp"
141
- # func.shp_to_raster(bu_PICC, os.getcwd()+"//TEMP//REMOVED_BUILDINGS//Scenario"+str(sc)+"//Removed_Buildings_PICC.tiff")
142
- # func.shp_to_raster(bu_CaBu, os.getcwd()+"//TEMP//REMOVED_BUILDINGS//Scenario"+str(sc)+"//Removed_Buildings_CaBu.tiff")
143
- # else:
144
- # print("No buildings were removed in water depth analysis OR No shapefiles in INPUT/REMOVED_BUILDINGS/Scenario"+str(sc))
145
- # print("STEP3: Generate Vulnerability rasters 1m")
146
- # attribute="Vulne"
147
- # Output_tiff = os.getcwd()+"//TEMP//VULNERABILITY//Scenario"+str(sc)+"//Vulnerability_SA.tiff"
148
- # func.Comp_Vulnerability(Output_tiff, attribute,sc)
149
- # attribute = "Code"
150
- # Output_tiff = os.getcwd()+"//TEMP//VULNERABILITY//Scenario"+str(sc)+"//Vulnerability_Code_SA.tiff"
151
- # print(Output_tiff)
152
- # func.Comp_Vulnerability(Output_tiff, attribute,sc)
153
- # print("STEP4: Save Vulnerability files for the area of interest")
154
- # func.match_vuln_modrec(os.getcwd() + "//TEMP//DATABASES//CE_IGN_TOP10V/CE_IGN_TOP10V.tiff",
155
- # os.getcwd() + "//TEMP//Masked/River_extent.tiff", os.getcwd()+"//INPUT//WATER_DEPTH//Scenario"+str(sc)+"//T1000.tif")
156
- # func.match_vuln_modrec(os.getcwd() + "//TEMP//VULNERABILITY//Scenario"+str(sc)+"//Vulnerability_SA.tiff",
157
- # os.getcwd() + "//OUTPUT//VULNERABILITY//Scenario"+str(sc)+"Vulnerability_"+str(AOI)+".tiff", os.getcwd()+"//INPUT//WATER_DEPTH//Scenario"+str(sc)+"//T1000.tif")
158
- # func.match_vuln_modrec(os.getcwd() + "//TEMP//VULNERABILITY//Scenario"+str(sc)+"//Vulnerability_Code_SA.tiff",
159
- # os.getcwd() + "//OUTPUT//VULNERABILITY//Scenario"+str(sc)+"//Vulnerability_Code"+str(AOI)+".tiff", os.getcwd()+"//INPUT//WATER_DEPTH//Scenario"+str(sc)+"//T1000.tif")
160
-
161
- # def Vulnerability2(main_dir, attribute):
162
- # os.chdir(main_dir)
163
- # Output_tiff = os.getcwd()+"//OUTPUT//VULNERABILITY//Vulnerability_Code.tiff"
164
- # func.Comp_Vulnerability(Output_tiff, attribute)
165
- # #func.match_vuln_modrec(os.getcwd() + "//TEMP//DATABASES//CE_IGN_TOP10V/CE_IGN_TOP10V.tiff",
166
- # # os.getcwd() + "//TEMP//Masked/River_extent.tiff")
167
- # #func.match_vuln_modrec(os.getcwd() + "//OUTPUT//VULNERABILITY//Vulnerability.tiff",
168
- # # os.getcwd() + "//TEMP//Masked/Vulnerability_extent.tiff")
169
- # #func.match_vuln_modrec(os.getcwd() + "//OUTPUT//VULNERABILITY//Vulnerability_Code.tiff",
170
- # # os.getcwd() + "//TEMP//Masked/Vulnerability_Code_extent.tiff")
171
-
172
- # def acceptability(main_dir,area_of_interest):
173
- # os.chdir(main_dir)
174
- # Vulne = gdal.Open(os.getcwd() + "//TEMP//Masked/Vulnerability_extent.tiff")
175
- # Vulne = Vulne.GetRasterBand(1).ReadAsArray()
176
- # riv = gdal.Open(os.getcwd() + "//TEMP//Masked/River_extent.tiff")
177
- # riv = riv.GetRasterBand(1).ReadAsArray()
178
- # list1 = ["2", "5", "15", "25", "50", "100", "1000"]
179
- # # sample for saving the raster
180
- # # mod1 = rasterio.open("G://00_GT_Resilience//Simulations_Theux//Scen_"+str(scen)+"//Theux_1.3K_sim_T1000_h.tif")
181
- # Qfile = pd.read_csv(os.getcwd() + "//INPUT//CSVs//Book2.csv")
182
- # # run vul-mod for 4 return intervals
183
- # x = glob.glob(os.getcwd() + "//INPUT//WATER_DEPTH//*.tiff")
184
- # Area_interest = area_of_interest
185
- # for i in range(len(list1)):
186
- # mod = gdal.Open(x[i])
187
- # mod = mod.GetRasterBand(1).ReadAsArray()
188
- # mod[mod == 0] = np.nan
189
- # mod[riv == 1] = np.nan
190
- # func.VulMod(Qfile, mod, Vulne, list1[i], Area_interest)
191
- # ax=locals()
192
- # list1=["2","5", "15","25", "50", "100", "1000"]
193
- # qs= glob.glob(os.getcwd()+"//TEMP//Q_files//*.tiff")
194
- # for i in range(len(list1)):
195
- # ax["vm"+str(i)] = gdal.Open(qs[i])
196
- # ax["vm"+str(i)] = ax["vm"+str(i)].GetRasterBand(1).ReadAsArray()
197
- # #Remove nans from other Q files for final acceptability computation
198
- # for i in range(len(list1)-1):
199
- # ax["vm"+str(i)+str(1)] = np.nan_to_num(ax["vm"+str(i)], nan=0)
200
- # ax["vm"+str(i)+str(1)][np.isnan(ax["vm"+str(len(list1))])] = np.nan
201
- # pond = pd.read_csv(os.getcwd()+"//INPUT//CSVs//Ponderation.csv")
202
- # comb = vm6*float(pond.iloc[6,1]) + vm51*float(pond.iloc[5,1]) + vm41*float(pond.iloc[4,1]) + vm31*float(pond.iloc[3,1]) + vm21*float(pond.iloc[2,1]) + vm11*float(pond.iloc[1,1]) +vm01*float(pond.iloc[0,1])
203
- # dst_filename = os.getcwd()+"//OUTPUT//ACCEPTABILITY//Acceptability"+str(area_of_interest)+".tiff"
204
- # y_pixels, x_pixels = comb.shape # number of pixels in x
205
- # driver = gdal.GetDriverByName('GTiff')
206
- # dataset = driver.Create(dst_filename, x_pixels, y_pixels, gdal.GDT_Float32, 1, options=["COMPRESS=LZW"])
207
- # dataset.GetRasterBand(1).WriteArray(comb.astype(np.float32))
208
- # input_raster = os.getcwd()+"//OUTPUT//ACCEPTABILITY//Acceptability"+str(area_of_interest)+".tiff"
209
- # output_raster = os.getcwd()+"//OUTPUT//ACCEPTABILITY//Acceptability"+str(area_of_interest)+"_100m.tiff"
210
- # Agg = gdal.Warp(output_raster, input_raster, xRes=100, yRes=100, resampleAlg='Average')
211
- # Agg = None