fimeval 0.1.51__py3-none-any.whl → 0.1.53__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,4 @@
1
1
  import os
2
- import ee
3
2
  import glob
4
3
  import geopandas as gpd
5
4
  import rasterio
@@ -21,6 +20,7 @@ def Changeintogpkg(input_path, output_dir, layer_name):
21
20
  gdf.to_file(output_gpkg, driver="GPKG")
22
21
  return output_gpkg
23
22
 
23
+
24
24
  def GetFloodedBuildingCountInfo(
25
25
  building_fp_path,
26
26
  study_area_path,
@@ -85,7 +85,7 @@ def GetFloodedBuildingCountInfo(
85
85
  if "bm" in str(raster1_path).lower():
86
86
  count_centroids_in_raster(raster1_path, "Benchmark")
87
87
  count_centroids_in_raster(raster2_path, "Candidate")
88
-
88
+
89
89
  elif "candidate" in str(raster2_path).lower():
90
90
  count_centroids_in_raster(raster1_path, "Candidate")
91
91
  count_centroids_in_raster(raster2_path, "Benchmark")
@@ -106,9 +106,10 @@ def GetFloodedBuildingCountInfo(
106
106
  CSI = TP / (TP + FP + FN) if (TP + FP + FN) > 0 else 0
107
107
  FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
108
108
  POD = TP / (TP + FN) if (TP + FN) > 0 else 0
109
-
110
-
111
- BDR = (centroid_counts["Candidate"]- centroid_counts["Benchmark"])/centroid_counts["Benchmark"]
109
+
110
+ BDR = (
111
+ centroid_counts["Candidate"] - centroid_counts["Benchmark"]
112
+ ) / centroid_counts["Benchmark"]
112
113
 
113
114
  counts_data = {
114
115
  "Category": [
@@ -223,6 +224,7 @@ def GetFloodedBuildingCountInfo(
223
224
  print(f"Performance metrics chart is saved as PNG at {output_path}")
224
225
  fig.show()
225
226
 
227
+
226
228
  def process_TIFF(
227
229
  tif_files, contingency_files, building_footprint, boundary, method_path
228
230
  ):
@@ -266,22 +268,46 @@ def process_TIFF(
266
268
  print("Warning: No benchmark file found.")
267
269
  elif not candidate_paths:
268
270
  print("Warning: No candidate files found.")
269
-
271
+
272
+
270
273
  def find_existing_footprint(out_dir):
271
274
  gpkg_files = list(Path(out_dir).glob("*.gpkg"))
272
275
  return gpkg_files[0] if gpkg_files else None
273
276
 
274
- #Incase user defined individual shapefile for each case study
277
+
278
+ # Incase user defined individual shapefile for each case study
275
279
  def detect_shapefile(folder):
276
- shapefile = None
277
- for ext in (".shp", ".gpkg", ".geojson", ".kml"):
278
- for file in os.listdir(folder):
279
- if file.lower().endswith(ext):
280
- shapefile = os.path.join(folder, file)
281
- print(f"Auto-detected shapefile: {shapefile}")
282
- return shapefile
283
- return None
284
-
280
+ shapefile = None
281
+ for ext in (".shp", ".gpkg", ".geojson", ".kml"):
282
+ for file in os.listdir(folder):
283
+ if file.lower().endswith(ext):
284
+ shapefile = os.path.join(folder, file)
285
+ print(f"Auto-detected shapefile: {shapefile}")
286
+ return shapefile
287
+ return None
288
+
289
+
290
+ def ensure_pyspark(version: str | None = "3.5.4") -> None:
291
+ """Install pyspark at runtime via `uv pip` into this env (no-op if present)."""
292
+ import importlib, shutil, subprocess, sys, re
293
+ try:
294
+ import importlib.util
295
+ if importlib.util.find_spec("pyspark"):
296
+ return
297
+ except Exception:
298
+ pass
299
+ uv = shutil.which("uv")
300
+ if not uv:
301
+ raise RuntimeError("`uv` not found on PATH. Please install uv or add it to PATH.")
302
+ if version is None:
303
+ spec = "pyspark"
304
+ else:
305
+ v = version.strip()
306
+ spec = f"pyspark{v}" if re.match(r"^[<>=!~]", v) else f"pyspark=={v}"
307
+ subprocess.check_call([uv, "pip", "install", "--python", sys.executable, spec])
308
+
309
+
310
+
285
311
  def EvaluationWithBuildingFootprint(
286
312
  main_dir,
287
313
  method_name,
@@ -289,6 +315,7 @@ def EvaluationWithBuildingFootprint(
289
315
  country=None,
290
316
  building_footprint=None,
291
317
  shapefile_dir=None,
318
+ geeprojectID=None,
292
319
  ):
293
320
  tif_files_main = glob.glob(os.path.join(main_dir, "*.tif"))
294
321
  if tif_files_main:
@@ -303,9 +330,7 @@ def EvaluationWithBuildingFootprint(
303
330
 
304
331
  if shapefile_dir:
305
332
  boundary = shapefile_dir
306
- elif os.path.exists(
307
- os.path.join(method_path, "BoundaryforEvaluation")
308
- ):
333
+ elif os.path.exists(os.path.join(method_path, "BoundaryforEvaluation")):
309
334
  boundary = os.path.join(
310
335
  method_path, "BoundaryforEvaluation", "FIMEvaluatedExtent.shp"
311
336
  )
@@ -313,9 +338,11 @@ def EvaluationWithBuildingFootprint(
313
338
  boundary = detect_shapefile(main_dir)
314
339
 
315
340
  building_footprintMS = building_footprint
341
+
316
342
  if building_footprintMS is None:
317
- import msfootprint as msf
318
-
343
+ ensure_pyspark()
344
+ from .microsoftBF import BuildingFootprintwithISO
345
+
319
346
  out_dir = os.path.join(method_path, "BuildingFootprint")
320
347
  if not os.path.exists(out_dir):
321
348
  os.makedirs(out_dir)
@@ -323,7 +350,15 @@ def EvaluationWithBuildingFootprint(
323
350
  if not EX_building_footprint:
324
351
  boundary_dir = shapefile_dir if shapefile_dir else boundary
325
352
 
326
- msf.BuildingFootprintwithISO(country, boundary_dir, out_dir)
353
+ if geeprojectID:
354
+ BuildingFootprintwithISO(
355
+ country,
356
+ boundary_dir,
357
+ out_dir,
358
+ geeprojectID=geeprojectID,
359
+ )
360
+ else:
361
+ BuildingFootprintwithISO(country, boundary_dir, out_dir)
327
362
  building_footprintMS = os.path.join(
328
363
  out_dir, f"building_footprint.gpkg"
329
364
  )
@@ -355,15 +390,19 @@ def EvaluationWithBuildingFootprint(
355
390
  os.path.join(method_path, "BoundaryforEvaluation")
356
391
  ):
357
392
  boundary = os.path.join(
358
- method_path, "BoundaryforEvaluation", "FIMEvaluatedExtent.shp"
393
+ method_path,
394
+ "BoundaryforEvaluation",
395
+ "FIMEvaluatedExtent.shp",
359
396
  )
360
397
  else:
361
398
  boundary = detect_shapefile(os.path.join(main_dir, folder))
362
399
 
363
400
  building_footprintMS = building_footprint
401
+
364
402
  if building_footprintMS is None:
365
- import msfootprint as msf
366
-
403
+ ensure_pyspark()
404
+ from .microsoftBF import BuildingFootprintwithISO
405
+
367
406
  out_dir = os.path.join(method_path, "BuildingFootprint")
368
407
  if not os.path.exists(out_dir):
369
408
  os.makedirs(out_dir)
@@ -372,9 +411,17 @@ def EvaluationWithBuildingFootprint(
372
411
  boundary_dir = (
373
412
  shapefile_dir if shapefile_dir else boundary
374
413
  )
375
- msf.BuildingFootprintwithISO(
376
- country, boundary_dir, out_dir
377
- )
414
+ if geeprojectID:
415
+ BuildingFootprintwithISO(
416
+ country,
417
+ boundary_dir,
418
+ out_dir,
419
+ geeprojectID=geeprojectID,
420
+ )
421
+ else:
422
+ BuildingFootprintwithISO(
423
+ country, boundary_dir, out_dir
424
+ )
378
425
  building_footprintMS = os.path.join(
379
426
  out_dir, f"building_footprint.gpkg"
380
427
  )
@@ -0,0 +1,131 @@
1
+ # Importing necessary libraries
2
+ import geemap
3
+ import ee
4
+ import os
5
+ from shapely.geometry import box
6
+ import pandas as pd
7
+ from pathlib import Path
8
+ import geopandas as gpd
9
+ from pyspark.sql import SparkSession
10
+ from shapely.wkt import loads
11
+ import shutil
12
+
13
+ # Suppress the warnings
14
+ import warnings
15
+
16
+ warnings.filterwarnings("ignore")
17
+
18
+ # Authenticate and initialize Earth Engine
19
+ ee.Authenticate()
20
+
21
+ # %%
22
+ def split_into_tiles(boundary, tile_size=0.1):
23
+ bounds = boundary.total_bounds
24
+ x_min, y_min, x_max, y_max = bounds
25
+ tiles = []
26
+ x = x_min
27
+ while x < x_max:
28
+ y = y_min
29
+ while y < y_max:
30
+ tile = box(x, y, x + tile_size, y + tile_size)
31
+ if tile.intersects(boundary.unary_union):
32
+ tiles.append(tile)
33
+ y += tile_size
34
+ x += tile_size
35
+ return gpd.GeoDataFrame(geometry=tiles, crs=boundary.crs)
36
+
37
+
38
+ # Merge the final geojson files
39
+ def mergeGeoJSONfiles(output_dir, merged_file):
40
+ output_dir = Path(output_dir)
41
+ files = list(output_dir.glob("*.geojson"))
42
+ gdfs = [gpd.read_file(file) for file in files]
43
+ merged_gdf = gpd.GeoDataFrame(pd.concat(gdfs, ignore_index=True), crs="EPSG:4326")
44
+ merged_gdf.to_file(merged_file, driver="GPKG")
45
+
46
+
47
+ # Process each batch with number of tiles
48
+ def process_batch(partition, collection_name, output_dir, boundary_wkt, projectID=None):
49
+ try:
50
+ if projectID:
51
+ ee.Initialize(project=projectID)
52
+ else:
53
+ ee.Initialize()
54
+
55
+ except Exception:
56
+ print("To initialize, please provide the earth engine project ID")
57
+
58
+ # Convert WKT boundary to geometry
59
+ boundary = loads(boundary_wkt)
60
+ results = []
61
+
62
+ for tile_wkt in partition:
63
+ try:
64
+ tile = loads(tile_wkt)
65
+ aoi = ee.Geometry(tile.__geo_interface__)
66
+ collection = ee.FeatureCollection(collection_name).filterBounds(aoi)
67
+
68
+ # Download features and filter by boundary
69
+ gdf = geemap.ee_to_gdf(collection)
70
+ gdf = gdf[gdf.geometry.intersects(boundary)]
71
+
72
+ # Save each tile as a GeoJSON file
73
+ tile_id = f"tile_{hash(tile)}"
74
+ output_file = Path(output_dir) / f"{tile_id}.geojson"
75
+ gdf.to_file(output_file, driver="GeoJSON")
76
+ results.append(f"Saved: {output_file}")
77
+ except Exception as e:
78
+ results.append(f"Error processing tile: {e}")
79
+
80
+ return results
81
+
82
+
83
+ def getBuildingFootprintSpark(
84
+ countryISO, boundary_file, out_dir, tile_size, projectID=None
85
+ ):
86
+ spark = SparkSession.builder.appName("BuildingFootprints").getOrCreate()
87
+
88
+ # Make temporary directory
89
+ temp_dir = out_dir / "temp"
90
+ temp_dir.mkdir(parents=True, exist_ok=True)
91
+
92
+ # Load and process boundary
93
+ boundary = gpd.read_file(boundary_file).to_crs("EPSG:4326")
94
+ tiles = split_into_tiles(boundary, tile_size)
95
+ boundary_wkt = boundary.unary_union.wkt
96
+
97
+ collection_names = [f"projects/sat-io/open-datasets/VIDA_COMBINED/{countryISO}"]
98
+
99
+ # Distribute processing
100
+ for collection_name in collection_names:
101
+ tiles_rdd = spark.sparkContext.parallelize(
102
+ tiles.geometry.apply(lambda x: x.wkt).tolist(), numSlices=10
103
+ )
104
+ results = tiles_rdd.mapPartitions(
105
+ lambda partition: process_batch(
106
+ partition, collection_name, str(temp_dir), boundary_wkt, projectID
107
+ )
108
+ ).collect()
109
+
110
+ # Merge GeoJSON files
111
+ mergeGeoJSONfiles(temp_dir, out_dir / "building_footprint.gpkg")
112
+
113
+ # Clean up the temp directory
114
+ shutil.rmtree(temp_dir, ignore_errors=True)
115
+
116
+ print(f"Building footprint data saved to {out_dir / 'building_footprint.gpkg'}")
117
+
118
+
119
+ # %%
120
+ # Export the building footprint
121
+ def BuildingFootprintwithISO(countryISO, ROI, out_dir, geeprojectID=None):
122
+ out_dir = Path(out_dir)
123
+ out_dir.mkdir(parents=True, exist_ok=True)
124
+ filename = out_dir / "building_footprint.gpkg"
125
+
126
+ if filename.exists():
127
+ os.remove(filename)
128
+
129
+ getBuildingFootprintSpark(
130
+ countryISO, ROI, out_dir, tile_size=0.05, projectID=geeprojectID
131
+ )
@@ -1,4 +1,4 @@
1
- #import Libraries
1
+ # import Libraries
2
2
  import geopandas as gpd
3
3
  import boto3
4
4
  import botocore
@@ -7,27 +7,27 @@ import tempfile
7
7
 
8
8
  # Initialize an anonymous S3 client
9
9
  s3 = boto3.client(
10
- 's3',
11
- config=botocore.config.Config(signature_version=botocore.UNSIGNED)
10
+ "s3", config=botocore.config.Config(signature_version=botocore.UNSIGNED)
12
11
  )
13
12
 
14
- bucket_name = 'sdmlab'
15
- pwb_folder = "PWB/"
13
+ bucket_name = "sdmlab"
14
+ pwb_folder = "PWB/"
15
+
16
16
 
17
17
  def PWB_inS3(s3_client, bucket, prefix):
18
18
  """Download all components of a shapefile from S3 into a temporary directory."""
19
19
  tmp_dir = tempfile.mkdtemp()
20
20
  response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)
21
- if 'Contents' not in response:
21
+ if "Contents" not in response:
22
22
  raise ValueError("No files found in the specified S3 folder.")
23
-
24
- for obj in response['Contents']:
25
- file_key = obj['Key']
26
- file_name = os.path.basename(file_key)
27
- if file_name.endswith(('.shp', '.shx', '.dbf', '.prj', '.cpg')):
23
+
24
+ for obj in response["Contents"]:
25
+ file_key = obj["Key"]
26
+ file_name = os.path.basename(file_key)
27
+ if file_name.endswith((".shp", ".shx", ".dbf", ".prj", ".cpg")):
28
28
  local_path = os.path.join(tmp_dir, file_name)
29
29
  s3_client.download_file(bucket, file_key, local_path)
30
-
30
+
31
31
  shp_files = [f for f in os.listdir(tmp_dir) if f.endswith(".shp")]
32
32
  if not shp_files:
33
33
  raise ValueError("No .shp file found after download.")
@@ -35,7 +35,8 @@ def PWB_inS3(s3_client, bucket, prefix):
35
35
  shp_path = os.path.join(tmp_dir, shp_files[0])
36
36
  return shp_path
37
37
 
38
+
38
39
  def get_PWB():
39
40
  shp_path = PWB_inS3(s3, bucket_name, pwb_folder)
40
41
  pwb = gpd.read_file(shp_path)
41
- return pwb
42
+ return pwb
@@ -21,12 +21,14 @@ from .metrics import evaluationmetrics
21
21
  from .PWBs3 import get_PWB
22
22
  from ..utilis import MakeFIMsUniform
23
23
 
24
- #giving the permission to the folder
24
+
25
+ # giving the permission to the folder
25
26
  def is_writable(path):
26
27
  """Check if the directory and its contents are writable."""
27
28
  path = Path(path)
28
29
  return os.access(path, os.W_OK)
29
30
 
31
+
30
32
  def fix_permissions(path):
31
33
  path = Path(path).resolve()
32
34
  script_path = Path(__file__).parent / "fix_permissions.sh"
@@ -35,11 +37,15 @@ def fix_permissions(path):
35
37
  raise FileNotFoundError(f"Shell script not found: {script_path}")
36
38
 
37
39
  if is_writable(path):
38
- return
40
+ return
39
41
 
40
42
  try:
41
- result = subprocess.run(["bash", str(script_path), str(path)],
42
- check=True, capture_output=True, text=True)
43
+ result = subprocess.run(
44
+ ["bash", str(script_path), str(path)],
45
+ check=True,
46
+ capture_output=True,
47
+ text=True,
48
+ )
43
49
  print(result.stdout)
44
50
  except subprocess.CalledProcessError as e:
45
51
  print(f"Shell script failed:\n{e.stderr}")
@@ -79,7 +85,7 @@ def evaluateFIM(
79
85
  # Get the smallest matched raster extent and make a boundary shapefile
80
86
  smallest_raster_path = get_smallest_raster_path(benchmark_path, *candidate_paths)
81
87
 
82
- #If method is AOI, and direct shapefile directory is not provided, then it will search for the shapefile in the folder
88
+ # If method is AOI, and direct shapefile directory is not provided, then it will search for the shapefile in the folder
83
89
  if method.__name__ == "AOI":
84
90
  # If shapefile is not provided, search in the folder
85
91
  if shapefile is None:
@@ -95,7 +101,6 @@ def evaluateFIM(
95
101
  raise FileNotFoundError(
96
102
  "No shapefile (.shp, .gpkg, .geojson, .kml) found in the folder and none provided. Either provide a shapefile directory or put shapefile inside folder directory."
97
103
  )
98
-
99
104
  # Run AOI with the found or provided shapefile
100
105
  bounding_geom = AOI(benchmark_path, shapefile, save_dir)
101
106
 
@@ -359,8 +364,10 @@ def evaluateFIM(
359
364
  print(f"Evaluation metrics saved to {csv_file}")
360
365
  return results
361
366
 
362
- #Safely deleting the folder
367
+
368
+ # Safely deleting the folder
363
369
  def safe_delete_folder(folder_path):
370
+ fix_permissions(folder_path)
364
371
  try:
365
372
  shutil.rmtree(folder_path)
366
373
  except PermissionError:
@@ -370,19 +377,28 @@ def safe_delete_folder(folder_path):
370
377
  except Exception as e:
371
378
  print(f"Error deleting {folder_path}: {e}")
372
379
 
373
- def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=None, target_crs=None, target_resolution=None):
380
+
381
+ def EvaluateFIM(
382
+ main_dir,
383
+ method_name,
384
+ output_dir,
385
+ PWB_dir=None,
386
+ shapefile_dir=None,
387
+ target_crs=None,
388
+ target_resolution=None,
389
+ ):
374
390
  main_dir = Path(main_dir)
375
391
  # Read the permanent water bodies
376
392
  if PWB_dir is None:
377
393
  gdf = get_PWB()
378
394
  else:
379
395
  gdf = gpd.read_file(PWB_dir)
380
-
381
- #Grant the permission to the main directory
396
+
397
+ # Grant the permission to the main directory
382
398
  print(f"Fixing permissions for {main_dir}...")
383
399
  fix_permissions(main_dir)
384
400
 
385
- #runt the process
401
+ # runt the process
386
402
  def process_TIFF(tif_files, folder_dir):
387
403
  benchmark_path = None
388
404
  candidate_path = []
@@ -422,7 +438,9 @@ def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=N
422
438
  # Check if main_dir directly contains tif files
423
439
  TIFFfiles_main_dir = list(main_dir.glob("*.tif"))
424
440
  if TIFFfiles_main_dir:
425
- MakeFIMsUniform(main_dir, target_crs=target_crs, target_resolution=target_resolution)
441
+ MakeFIMsUniform(
442
+ main_dir, target_crs=target_crs, target_resolution=target_resolution
443
+ )
426
444
 
427
445
  # processing folder
428
446
  processing_folder = main_dir / "processing"
@@ -434,15 +452,20 @@ def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=N
434
452
  for folder in main_dir.iterdir():
435
453
  if folder.is_dir():
436
454
  tif_files = list(folder.glob("*.tif"))
437
-
455
+
438
456
  if tif_files:
439
- MakeFIMsUniform(folder, target_crs=target_crs, target_resolution=target_resolution)
440
-
457
+ MakeFIMsUniform(
458
+ folder,
459
+ target_crs=target_crs,
460
+ target_resolution=target_resolution,
461
+ )
462
+
441
463
  processing_folder = folder / "processing"
442
464
  TIFFfiles = list(processing_folder.glob("*.tif"))
443
465
 
444
466
  process_TIFF(TIFFfiles, folder)
445
467
  safe_delete_folder(processing_folder)
446
468
  else:
447
- print(f"Skipping {folder.name} as it doesn't contain any tif files.")
448
-
469
+ print(
470
+ f"Skipping {folder.name} as it doesn't contain any tif files."
471
+ )
@@ -7,21 +7,21 @@ def evaluationmetrics(out_image1, out_image2):
7
7
  unique_values, counts = np.unique(merged, return_counts=True)
8
8
  class_pixel_counts = dict(zip(unique_values, counts))
9
9
  class_pixel_counts
10
- TN = class_pixel_counts.get(1,0)
11
- FP = class_pixel_counts.get(2,0)
12
- FN = class_pixel_counts.get(3,0)
13
- TP = class_pixel_counts.get(4,0)
10
+ TN = class_pixel_counts.get(1, 0)
11
+ FP = class_pixel_counts.get(2, 0)
12
+ FN = class_pixel_counts.get(3, 0)
13
+ TP = class_pixel_counts.get(4, 0)
14
14
  epsilon = 1e-8
15
- TPR = TP / (TP + FN+epsilon)
16
- FNR = FN / (TP + FN+epsilon)
17
- Acc = (TP + TN) / (TP + TN + FP + FN+epsilon)
18
- Prec = TP / (TP + FP+epsilon)
19
- sen = TP / (TP + FN+epsilon)
20
- F1_score = 2 * (Prec * sen) / (Prec + sen+epsilon)
21
- CSI = TP / (TP + FN + FP+epsilon)
22
- POD = TP / (TP + FN+epsilon)
23
- FPR = FP / (FP + TN+epsilon)
24
- FAR = FP / (TP + FP+epsilon)
15
+ TPR = TP / (TP + FN + epsilon)
16
+ FNR = FN / (TP + FN + epsilon)
17
+ Acc = (TP + TN) / (TP + TN + FP + FN + epsilon)
18
+ Prec = TP / (TP + FP + epsilon)
19
+ sen = TP / (TP + FN + epsilon)
20
+ F1_score = 2 * (Prec * sen) / (Prec + sen + epsilon)
21
+ CSI = TP / (TP + FN + FP + epsilon)
22
+ POD = TP / (TP + FN + epsilon)
23
+ FPR = FP / (FP + TN + epsilon)
24
+ FAR = FP / (TP + FP + epsilon)
25
25
 
26
26
  return (
27
27
  unique_values,
@@ -65,7 +65,7 @@ def getContingencyMap(raster_path, method_path):
65
65
  2: "False positive",
66
66
  3: "False negative",
67
67
  4: "True positive",
68
- 5: "Permanent water bodies"
68
+ 5: "Permanent water bodies",
69
69
  }
70
70
  legend_patches = [
71
71
  Patch(
fimeval/__init__.py CHANGED
@@ -1,11 +1,20 @@
1
- #Evaluation modules
1
+ # Evaluation modules
2
2
  from .ContingencyMap.evaluationFIM import EvaluateFIM
3
3
  from .ContingencyMap.printcontingency import PrintContingencyMap
4
4
  from .ContingencyMap.plotevaluationmetrics import PlotEvaluationMetrics
5
5
  from .ContingencyMap.PWBs3 import get_PWB
6
6
 
7
- #Utility modules
7
+ # Utility modules
8
8
  from .utilis import compress_tif_lzw
9
9
 
10
10
  # Evaluation with Building foorprint module
11
11
  from .BuildingFootprint.evaluationwithBF import EvaluationWithBuildingFootprint
12
+
13
+ __all__ = [
14
+ "EvaluateFIM",
15
+ "PrintContingencyMap",
16
+ "PlotEvaluationMetrics",
17
+ "get_PWB",
18
+ "EvaluationWithBuildingFootprint",
19
+ "compress_tif_lzw",
20
+ ]
fimeval/utilis.py CHANGED
@@ -1,3 +1,4 @@
1
+ import os
1
2
  import shutil
2
3
  import pyproj
3
4
  import rasterio
@@ -5,24 +6,27 @@ from pathlib import Path
5
6
  import geopandas as gpd
6
7
  from rasterio.warp import calculate_default_transform, reproject, Resampling
7
8
 
8
- #Lossless compression to reduce the file size
9
+
10
+ # Lossless compression to reduce the file size
9
11
  def compress_tif_lzw(tif_path):
10
12
  # Read original file
11
13
  with rasterio.open(tif_path) as src:
12
14
  profile = src.profile.copy()
13
15
  data = src.read()
14
- profile.update(compress='lzw')
16
+ profile.update(compress="lzw")
15
17
 
16
- with rasterio.open(tif_path, 'w', **profile) as dst:
18
+ with rasterio.open(tif_path, "w", **profile) as dst:
17
19
  dst.write(data)
18
20
 
19
- #Check whether it is a projected CRS
21
+
22
+ # Check whether it is a projected CRS
20
23
  def is_projected_crs(crs):
21
24
  return crs and crs.is_projected
22
25
 
23
- #Check if the FIM bounds are within the CONUS
26
+
27
+ # Check if the FIM bounds are within the CONUS
24
28
  def is_within_conus(bounds, crs=None):
25
- CONUS_BBOX = (-125, 24, -66.5, 49.5)
29
+ CONUS_BBOX = (-125, 24, -66.5, 49.5)
26
30
  left, bottom, right, top = bounds
27
31
 
28
32
  if crs and crs.is_projected:
@@ -37,7 +41,8 @@ def is_within_conus(bounds, crs=None):
37
41
  and top <= CONUS_BBOX[3]
38
42
  )
39
43
 
40
- #Reproject the FIMs to EPSG:5070 if withinUS and user doesnot define any target CRS, else user need to define it
44
+
45
+ # Reproject the FIMs to EPSG:5070 if withinUS and user doesnot define any target CRS, else user need to define it
41
46
  def reprojectFIMs(src_path, dst_path, target_crs):
42
47
  with rasterio.open(src_path) as src:
43
48
  if src.crs != target_crs:
@@ -45,14 +50,16 @@ def reprojectFIMs(src_path, dst_path, target_crs):
45
50
  src.crs, target_crs, src.width, src.height, *src.bounds
46
51
  )
47
52
  kwargs = src.meta.copy()
48
- kwargs.update({
49
- 'crs': target_crs,
50
- 'transform': transform,
51
- 'width': width,
52
- 'height': height
53
- })
54
-
55
- with rasterio.open(dst_path, 'w', **kwargs) as dst:
53
+ kwargs.update(
54
+ {
55
+ "crs": target_crs,
56
+ "transform": transform,
57
+ "width": width,
58
+ "height": height,
59
+ }
60
+ )
61
+
62
+ with rasterio.open(dst_path, "w", **kwargs) as dst:
56
63
  for i in range(1, src.count + 1):
57
64
  reproject(
58
65
  source=rasterio.band(src, i),
@@ -61,29 +68,32 @@ def reprojectFIMs(src_path, dst_path, target_crs):
61
68
  src_crs=src.crs,
62
69
  dst_transform=transform,
63
70
  dst_crs=target_crs,
64
- resampling=Resampling.nearest
71
+ resampling=Resampling.nearest,
65
72
  )
66
73
  else:
67
74
  print(f"Source raster is already in {target_crs}. No reprojection needed.")
68
75
  shutil.copy(src_path, dst_path)
69
76
  compress_tif_lzw(dst_path)
70
77
 
71
- #Resample into the coarser resoution amoung all FIMS within the case
78
+
79
+ # Resample into the coarser resoution amoung all FIMS within the case
72
80
  def resample_to_resolution(src_path, x_resolution, y_resolution):
81
+ src_path = Path(src_path)
82
+ print(src_path)
83
+ temp_path = src_path.with_name(src_path.stem + "_resampled.tif")
84
+ print(temp_path)
85
+
73
86
  with rasterio.open(src_path) as src:
74
- transform = rasterio.transform.from_origin(src.bounds.left, src.bounds.top, x_resolution, y_resolution)
87
+ transform = rasterio.transform.from_origin(
88
+ src.bounds.left, src.bounds.top, x_resolution, y_resolution
89
+ )
75
90
  width = int((src.bounds.right - src.bounds.left) / x_resolution)
76
91
  height = int((src.bounds.top - src.bounds.bottom) / y_resolution)
77
-
78
92
  kwargs = src.meta.copy()
79
- kwargs.update({
80
- 'transform': transform,
81
- 'width': width,
82
- 'height': height
83
- })
84
-
85
- dst_path = src_path
86
- with rasterio.open(dst_path, 'w', **kwargs) as dst:
93
+ kwargs.update({"transform": transform, "width": width, "height": height})
94
+
95
+ # Write to temporary file
96
+ with rasterio.open(temp_path, "w", **kwargs) as dst:
87
97
  for i in range(1, src.count + 1):
88
98
  reproject(
89
99
  source=rasterio.band(src, i),
@@ -92,90 +102,87 @@ def resample_to_resolution(src_path, x_resolution, y_resolution):
92
102
  src_crs=src.crs,
93
103
  dst_transform=transform,
94
104
  dst_crs=src.crs,
95
- resampling=Resampling.nearest
105
+ resampling=Resampling.nearest,
96
106
  )
97
- # compress_tif_lzw(dst_path)
98
107
 
99
- #Check if the FIMs are in the same CRS or not else do further operation
108
+ os.remove(src_path) # delete original
109
+ os.rename(temp_path, src_path)
110
+
111
+
112
+ # Check if the FIMs are in the same CRS or not else do further operation
100
113
  def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
101
114
  fim_dir = Path(fim_dir)
102
- tif_files = list(fim_dir.glob('*.tif'))
115
+ tif_files = list(fim_dir.glob("*.tif"))
103
116
  if not tif_files:
104
117
  print(f"No TIFF files found in {fim_dir}")
105
118
  return
106
- processing_folder = fim_dir / 'processing'
107
- processing_folder.mkdir(exist_ok=True)
108
119
 
109
- crs_list = []
110
- projected_status = []
111
- bounds_list = []
112
- fims_data = []
120
+ # Create processing folder to save standardized files
121
+ processing_folder = fim_dir / "processing"
122
+ processing_folder.mkdir(exist_ok=True)
113
123
 
124
+ # Collect info about each TIFF
125
+ crs_list, resolutions, bounds_list, projected_flags = [], [], [], []
114
126
  for tif_path in tif_files:
115
127
  try:
116
128
  with rasterio.open(tif_path) as src:
117
129
  crs_list.append(src.crs)
118
- projected_status.append(is_projected_crs(src.crs))
130
+ resolutions.append(src.res)
119
131
  bounds_list.append(src.bounds)
120
- fims_data.append((src.bounds, src.crs))
121
- except rasterio.RasterioIOError as e:
132
+ projected_flags.append(is_projected_crs(src.crs))
133
+ except Exception as e:
122
134
  print(f"Error opening {tif_path}: {e}")
123
135
  return
124
136
 
125
- all_projected = all(projected_status)
126
- first_crs = crs_list[0] if crs_list else None
127
- all_same_crs = all(crs == first_crs for crs in crs_list)
137
+ # CRS Check & Reproject if needed
138
+ all_projected = all(projected_flags)
139
+ all_same_crs = len(set(crs_list)) == 1
128
140
 
129
141
  if not all_projected or (all_projected and not all_same_crs):
130
- if target_crs:
131
- print(f"Reprojecting all FIMs to {target_crs}.")
132
- for src_path in tif_files:
133
- dst_path = processing_folder / src_path.name
134
- reprojectFIMs(str(src_path), str(dst_path), target_crs)
135
- compress_tif_lzw(dst_path)
136
- else:
137
- all_within_conus = all(is_within_conus(bounds_list[i], crs_list[i]) for i in range(len(bounds_list)))
138
-
139
- if all_within_conus:
140
- default_target_crs = "EPSG:5070"
141
- print(f"FIMs are within CONUS, reprojecting all to {default_target_crs} and saving to {processing_folder}")
142
- for src_path in tif_files:
143
- dst_path = processing_folder / src_path.name
144
- reprojectFIMs(str(src_path), str(dst_path), default_target_crs)
142
+ # Decide CRS to use
143
+ final_crs = target_crs
144
+ if not final_crs:
145
+ if all(is_within_conus(b, c) for b, c in zip(bounds_list, crs_list)):
146
+ final_crs = "EPSG:5070"
147
+ print(f"Defaulting to CONUS CRS: {final_crs}")
145
148
  else:
146
- print("All flood maps are not in the projected CRS or are not in the same projected CRS.\n")
147
- print("Please provide a target CRS in EPSG format.")
148
- else:
149
- for src_path in tif_files:
150
- dst_path = processing_folder / src_path.name
151
- shutil.copy(src_path, dst_path)
152
-
153
- # Resolution check and resampling
154
- processed_tifs = list(processing_folder.glob('*.tif'))
155
- if processed_tifs:
156
- resolutions = []
157
- for tif_path in processed_tifs:
158
- try:
159
- with rasterio.open(tif_path) as src:
160
- resolutions.append(src.res)
161
- except rasterio.RasterioIOError as e:
162
- print(f"Error opening {tif_path} in processing folder: {e}")
149
+ print(
150
+ "Mixed or non-CONUS CRS detected. Please provide a valid target CRS."
151
+ )
163
152
  return
164
153
 
165
- first_resolution = resolutions[0] if resolutions else None
166
- all_same_resolution = all(res == first_resolution for res in resolutions)
154
+ print(f"Reprojecting all rasters to {final_crs}")
155
+ for src_path in tif_files:
156
+ dst_path = processing_folder / src_path.name
157
+ reprojectFIMs(str(src_path), str(dst_path), final_crs)
158
+ compress_tif_lzw(dst_path)
167
159
 
168
- if not all_same_resolution:
169
- if target_resolution is not None:
170
- for src_path in processed_tifs:
171
- resample_to_resolution(str(src_path), target_resolution, target_resolution)
172
- else:
173
- coarser_x = max(res[0] for res in resolutions)
174
- coarser_y = max(res[1] for res in resolutions)
175
- print(f"Using coarser resolution: X={coarser_x}, Y={coarser_y}. Resampling all FIMS to this resolution.")
176
- for src_path in processed_tifs:
177
- resample_to_resolution(str(src_path), coarser_x, coarser_y)
178
- else:
179
- print("All FIMs in the processing folder have the same resolution.")
180
160
  else:
181
- print("No TIFF files found in the processing folder after CRS standardization.")
161
+ print(
162
+ "All rasters are in the same projected CRS. Copying to processing folder."
163
+ )
164
+ for src_path in tif_files:
165
+ shutil.copy(src_path, processing_folder / src_path.name)
166
+
167
+ # Resolution Check & Resample if needed
168
+ processed_tifs = list(processing_folder.glob("*.tif"))
169
+ final_resolutions = []
170
+ for tif_path in processed_tifs:
171
+ with rasterio.open(tif_path) as src:
172
+ final_resolutions.append(src.res)
173
+
174
+ unique_res = set(final_resolutions)
175
+ if target_resolution:
176
+ print(f"Resampling all rasters to target resolution: {target_resolution}m.")
177
+ for src_path in processed_tifs:
178
+ resample_to_resolution(str(src_path), target_resolution, target_resolution)
179
+
180
+ # Otherwise, only resample if resolutions are inconsistent
181
+ elif len(unique_res) > 1:
182
+ coarsest_x = max(res[0] for res in final_resolutions)
183
+ coarsest_y = max(res[1] for res in final_resolutions)
184
+ print(f"Using coarsest resolution: X={coarsest_x}, Y={coarsest_y}")
185
+ for src_path in processed_tifs:
186
+ resample_to_resolution(str(src_path), coarsest_x, coarsest_y)
187
+ else:
188
+ print("All rasters already have the same resolution. No resampling needed.")
@@ -1,33 +1,29 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: fimeval
3
- Version: 0.1.51
3
+ Version: 0.1.53
4
4
  Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
5
- License: GPLv3
6
5
  Author: Surface Dynamics Modeling Lab
7
- Maintainer: Supath Dhital
8
- Maintainer-email: sdhital@crimson.ua.edu
9
- Requires-Python: >=3.10,<4.0
10
- Classifier: License :: Other/Proprietary License
11
- Classifier: Programming Language :: Python :: 3
12
- Classifier: Programming Language :: Python :: 3.10
13
- Classifier: Programming Language :: Python :: 3.11
14
- Classifier: Programming Language :: Python :: 3.12
15
- Classifier: Programming Language :: Python :: 3.13
16
- Requires-Dist: boto3 (>=1.36.16,<2.0.0)
17
- Requires-Dist: geopandas (>=1.0.1,<2.0.0)
18
- Requires-Dist: kaleido (==0.2.1)
19
- Requires-Dist: matplotlib (>=3.9.2,<4.0.0)
20
- Requires-Dist: msfootprint (>=0.1.27,<0.2.0)
21
- Requires-Dist: nbformat (>=5.10.4,<6.0.0)
22
- Requires-Dist: notebook (>=7.3.2,<8.0.0)
23
- Requires-Dist: numpy (<2)
24
- Requires-Dist: pathlib (>=1.0.1,<2.0.0)
25
- Requires-Dist: plotly (>=5.24.1,<6.0.0)
26
- Requires-Dist: pyproj (>=3.7.0,<4.0.0)
27
- Requires-Dist: pytest (>=8.3.3,<9.0.0)
28
- Requires-Dist: rasterio (>=1.4.2,<2.0.0)
29
- Requires-Dist: shapely (>=2.0.6,<3.0.0)
6
+ Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
7
+ Maintainer-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
8
+ Requires-Python: >=3.10
30
9
  Description-Content-Type: text/markdown
10
+ License-File: LICENSE.txt
11
+ Requires-Dist: rasterio<2.0.0,>=1.4.2
12
+ Requires-Dist: numpy>=2
13
+ Requires-Dist: geopandas<2.0.0,>=1.0.1
14
+ Requires-Dist: shapely<3.0.0,>=2.0.6
15
+ Requires-Dist: matplotlib<4.0.0,>=3.9.2
16
+ Requires-Dist: plotly<6.0.0,>=5.24.1
17
+ Requires-Dist: kaleido==0.2.1
18
+ Requires-Dist: nbformat<6.0.0,>=5.10.4
19
+ Requires-Dist: pyproj<4.0.0,>=3.7.0
20
+ Requires-Dist: notebook<8.0.0,>=7.3.2
21
+ Requires-Dist: boto3<2.0.0,>=1.36.16
22
+ Requires-Dist: geemap
23
+ Provides-Extra: dev
24
+ Requires-Dist: pytest; extra == "dev"
25
+ Requires-Dist: black; extra == "dev"
26
+ Dynamic: license-file
31
27
 
32
28
  ## Flood Inundation Mapping Predictions Evaluation Framework (FIMeval)
33
29
  <hr style="border: 1px solid black; margin: 0;">
@@ -65,6 +61,7 @@ fimeval/
65
61
  ├── src/
66
62
  │ └── fimeval/
67
63
  │ ├──BuildingFootprint/ # Contains the evaluation of model predicted FIM with microsoft building footprint
64
+ │ │ └── microsoftBF.py
68
65
  │ │ └── evaluationwithBF.py
69
66
  │ └── ContingencyMap/ # Contains all the metrics calculation and contingency map generation
70
67
  │ │ ├── evaluationFIM.py # main evaluation moodule
@@ -90,7 +87,8 @@ This framework is published as a python package in PyPI (https://pypi.org/projec
90
87
 
91
88
  ```bash
92
89
  #Install to use this framework
93
- pip install fimeval
90
+ pip install uv #Makes the downloading much faster
91
+ uv pip install fimeval
94
92
 
95
93
  #Use this framework in your workflows using poetry
96
94
  poetry add fimeval
@@ -163,7 +161,7 @@ Table 1: Modules in `fimeval` are in order of execution.
163
161
  | `EvaluateFIM` | It runs all the evaluation of FIM between B-FIM and M-FIMs. | `main_dir`: Main directory containing the case study folders, <br> `method_name`: How users wants to evaluate their FIM, <br> `outpur_dir`: Output directory where all the results and the intermidiate files will be saved for further calculation, <br> *`PWB_dir`*: The permanenet water bodies vectory file directory if user wants to user their own boundary, <br> *`target_crs`*: this fimeval framework needs the floodmaps to be in projected CRS so define the projected CRS in epsg code format, <br> *`target_resolution`*: sometime if the benchmark is very high resolution than candidate FIMs, it needs heavy computational time, so user can define the resolution if there FIMs are in different spatial resolution, else it will use the coarser resolution among all FIMS within that case. |The outputs includes generated files in TIFF, SHP, CSV, and PNG formats, all stored within the output folder. Users can visualize the TIFF files using any geospatial platform. The TIFF files consist of the binary Benchmark-FIM (Benchmark.tif), Model-FIM (Candidate.tif), and Agreement-FIM (Contingency.tif). The shp files contain the boundary of the generated flood extent.|
164
162
  | `PlotContingencyMap` | For better understanding, It will print the agreement maps derived in first step. | `main_dir`, `method_name`, `output_dir` : Based on the those arguments, once all the evaluation is done, it will dynamically get the corresponding contingency raster for printing.| This prints the contingency map showing different class of evaluation (TP, FP, no data, PWB etc). The outputs look like- Figure 4 first row.|
165
163
  | `PlotEvaluationMetrics` | For quick understanding of the evaluation metrics, to plot bar of evaluation scores. | `main_dir`, `method_name`, `output_dir` : Based on the those arguments, once all the evaluation is done, it will dynamically get the corresponding file for printing based on all those info.| This prints the bar plots which includes different performance metrics calculated by EvaluateFIM module. The outputs look like- Figure 4 second row.|
166
- | `EvaluationWithBuildingFootprint` | For Building Footprint Analysis, user can specify shapefile of building footprints as .shp or .gpkg format. By default it consider global Microsoft building footprint dataset. Those data are hosted in Google Earth Engine (GEE) so, It pops up to authenticate the GEE account, please allow it and it will download the data based on evaluation boundary and evaluation is done. | `main_dir`, `method_name`, `output_dir`: Those arguments are as it is, same as all other modules. <br> *`building_footprint`*: If user wants to use their own building footprint file then pass the directory here, *`country`*: It is the 3 letter based country ISO code (eg. 'USA', NEP' etc), for the building data automation using GEE based on the evaluation extent, *`shapefile_dir`*: this is the directory of user defined AOI if user is working with their own boundary and automatic Building footprint download and evaluation. | It will calculate the different metrics (e.g. TP, FP, CSI, F1, Accuracy etc) based on hit and miss of building on different M-FIM and B-FIM. Those all metrics will be saved as CSV format in `output_dir` and finally using that info it prints the counts of building foorpint in each FIMs as well as scenario on the evaluation end via bar plot.|
164
+ | `EvaluationWithBuildingFootprint` | For Building Footprint Analysis, user can specify shapefile of building footprints as .shp or .gpkg format. By default it consider global Microsoft building footprint dataset. Those data are hosted in Google Earth Engine (GEE) so, It pops up to authenticate the GEE account, please allow it and it will download the data based on evaluation boundary and evaluation is done. | `main_dir`, `method_name`, `output_dir`: Those arguments are as it is, same as all other modules. <br> *`building_footprint`*: If user wants to use their own building footprint file then pass the directory here, *`country`*: It is the 3 letter based country ISO code (eg. 'USA', NEP' etc), for the building data automation using GEE based on the evaluation extent, *`shapefile_dir`*: this is the directory of user defined AOI if user is working with their own boundary and automatic Building footprint download and evaluation, *`geeprojectID`*: this is the google earth engine google cloud project ID, which helps to access the GEE data and resources to work with building footprint download and process. | It will calculate the different metrics (e.g. TP, FP, CSI, F1, Accuracy etc) based on hit and miss of building on different M-FIM and B-FIM. Those all metrics will be saved as CSV format in `output_dir` and finally using that info it prints the counts of building foorpint in each FIMs as well as scenario on the evaluation end via bar plot.|
167
165
 
168
166
  <p align="center">
169
167
  <img src="./Images/methodsresults_combined.jpg" width="750" />
@@ -171,6 +169,58 @@ Table 1: Modules in `fimeval` are in order of execution.
171
169
 
172
170
  Figure 4: Combined raw output from framework for different two method. First row (subplot a and b) and second row (subplot c and d) is contingency maps and evaluation metrics of FIM derived using `PrintContingencyMaP` and `PlotEvaluationMetrics` module. Third row (subplot e and f) is the output after processing and calculating of evaluation with BF by unsing `EvaluateWithBuildingFoorprint` module.
173
171
 
172
+ ## Installation Instructions
173
+
174
+ ### 1. Prerequisites
175
+
176
+ Before installing `fimeval`, ensure the following software are installed:
177
+
178
+ - **Python**: Version 3.10 or higher
179
+ - **Anaconda**: For managing environments and dependencies
180
+ - **GIS Software**: For Visulalisation
181
+ - [ArcGIS](https://www.esri.com/en-us/arcgis/products/index) or [QGIS](https://qgis.org/en/site/)
182
+ - **Optional**:
183
+ - [Google Earth Engine](https://earthengine.google.com/) account
184
+ - Java Runtime Environment (for using GEE API)
185
+
186
+ ---
187
+
188
+ ### 2. Install Anaconda
189
+
190
+ If Anaconda is not installed, download and install it from the [official website](https://www.anaconda.com/products/distribution).
191
+
192
+ ---
193
+
194
+ ### 3. Set Up Virtual Environment
195
+
196
+ #### For Mac Users
197
+
198
+ Open **Terminal** and run:
199
+ ```bash
200
+ # Create a new environment named 'fimeval'
201
+ conda create --name fimeval python=3.10
202
+
203
+ # Activate the environment
204
+ conda activate fimeval
205
+
206
+ # Install fimeval package
207
+ pip install uv
208
+ uv pip install fimeval
209
+ ```
210
+
211
+ ### Google Colab Version
212
+
213
+ To use fimeval in Google Colab, follow the steps below:
214
+
215
+ ## Upload Files
216
+ Upload all necessary input files (e.g., raster, shapefiles, model outputs) to your Google Drive.
217
+ ## Open Google Colab
218
+ Go to Google Colab and sign in with a valid Google account.
219
+ ## Mount Google Drive
220
+ In a new Colab notebook, mount the Google Drive
221
+ ```bash
222
+ pip install fimeval
223
+ ```
174
224
  ### **Acknowledgements**
175
225
  | | |
176
226
  | --- | --- |
@@ -181,4 +231,3 @@ Contact <a href="https://geography.ua.edu/people/sagy-cohen/" target="_blank">Sa
181
231
  (sagy.cohen@ua.edu)
182
232
  Dipsikha Devi, (ddevi@ua.edu)
183
233
  Supath Dhital, (sdhital@crimson.ua.edu)
184
-
@@ -0,0 +1,17 @@
1
+ fimeval/__init__.py,sha256=HZJKq7XEhL6TnwFkhpf8NcEQ5h7zPQ3XJh3z5gF0gQ8,603
2
+ fimeval/utilis.py,sha256=JYpWTJx3WOuC0wRr3nb-cwMPf0SAGIli5dJdKFS23w8,6954
3
+ fimeval/BuildingFootprint/__init__.py,sha256=oP9YWLdo6ANzSQFxYLv7Ku_26AY5NkLNhZLK28ICMLo,109
4
+ fimeval/BuildingFootprint/evaluationwithBF.py,sha256=UjH_Fz38hiIa5NRSr5K7NobGTg-7cjfbzC68QmsmEeo,16050
5
+ fimeval/BuildingFootprint/microsoftBF.py,sha256=gpNX30apn7R48LsZbVyiCbbtKMjNAw3G-Xz7uPTFtrs,4062
6
+ fimeval/ContingencyMap/PWBs3.py,sha256=UFICxO58c2fA9mIffH4ooqphv3ZXe6yX8QzpRjtI6fs,1275
7
+ fimeval/ContingencyMap/__init__.py,sha256=ckps2dyg6aci3TA-3P7oTMcCAcSTz9AA6sndHtZEwdE,259
8
+ fimeval/ContingencyMap/evaluationFIM.py,sha256=IFmY7rCnqpyDVS5Qd6IyNzpt_wLv0po--Uy_PFsflZs,17016
9
+ fimeval/ContingencyMap/methods.py,sha256=kbutfo9FUH-yjvnOXxwLpdErUuebMJ8NjCroNWIYCjo,3299
10
+ fimeval/ContingencyMap/metrics.py,sha256=jwOia0Nl7aU7AuGJFAcQ4fVENnp2G_5W6JSJBzo1-_4,1094
11
+ fimeval/ContingencyMap/plotevaluationmetrics.py,sha256=3bKfPKZnMR39dA3teDVpQBeTFKnF9v_2Vku0JNVGggs,3921
12
+ fimeval/ContingencyMap/printcontingency.py,sha256=-1H_Ze2TbRSER7vy7bd0HvxnziNzPPOIPOm2YhB7r4A,5422
13
+ fimeval-0.1.53.dist-info/licenses/LICENSE.txt,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
14
+ fimeval-0.1.53.dist-info/METADATA,sha256=-VIziBiACuZ4-pZ7g87HXzl8xrq4TNOxtIo1QW8h_FU,16251
15
+ fimeval-0.1.53.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
16
+ fimeval-0.1.53.dist-info/top_level.txt,sha256=F4QW50msI8sRrX_DK3NQ-s3swQ4-2_5Ty3mfm9ZMc6k,8
17
+ fimeval-0.1.53.dist-info/RECORD,,
@@ -1,4 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ fimeval
@@ -1,23 +0,0 @@
1
- #!/bin/bash
2
-
3
- DIR="$1"
4
-
5
- if [ -z "$DIR" ]; then
6
- echo "No directory provided."
7
- exit 1
8
- fi
9
- echo "Fixing permissions for: $DIR"
10
-
11
- UNAME=$(uname)
12
- if [[ "$UNAME" == "Darwin" || "$UNAME" == "Linux" ]]; then
13
- chmod -R u+rwX "$DIR"
14
- echo "Permissions granted for user (u+rwX)"
15
-
16
- elif [[ "$UNAME" == *"MINGW"* || "$UNAME" == *"MSYS"* || "$UNAME" == *"CYGWIN"* ]]; then
17
- icacls "$DIR" /grant Everyone:F /T > /dev/null
18
- echo "Permissions granted for working folder"
19
-
20
- else
21
- echo "Unsupported OS: $UNAME"
22
- exit 1
23
- fi
@@ -1,16 +0,0 @@
1
- fimeval/BuildingFootprint/__init__.py,sha256=oP9YWLdo6ANzSQFxYLv7Ku_26AY5NkLNhZLK28ICMLo,109
2
- fimeval/BuildingFootprint/evaluationwithBF.py,sha256=mnmcfyNPL_XBg9fIDXJjBWsRDKlCZ8HPjIhzCiFJkb8,14352
3
- fimeval/ContingencyMap/PWBs3.py,sha256=YAg03jzdplYIstG-pZM1MECse7gYjWrJNKAopjgt3uk,1294
4
- fimeval/ContingencyMap/__init__.py,sha256=ckps2dyg6aci3TA-3P7oTMcCAcSTz9AA6sndHtZEwdE,259
5
- fimeval/ContingencyMap/evaluationFIM.py,sha256=ZVoSAseQ_tb_WrY0JLZaTc9z3VCjMLt7iw_OY2-Gobc,16796
6
- fimeval/ContingencyMap/fix_permissions.sh,sha256=prIeJGXwAUO28nhgtCtvcpOxWK-J75rxN4FQ6QjGET4,508
7
- fimeval/ContingencyMap/methods.py,sha256=kbutfo9FUH-yjvnOXxwLpdErUuebMJ8NjCroNWIYCjo,3299
8
- fimeval/ContingencyMap/metrics.py,sha256=eEv1zAfmIjyg9OWM1b6-i25q_3jEBmeLZ7JeuvxS1QI,1070
9
- fimeval/ContingencyMap/plotevaluationmetrics.py,sha256=3bKfPKZnMR39dA3teDVpQBeTFKnF9v_2Vku0JNVGggs,3921
10
- fimeval/ContingencyMap/printcontingency.py,sha256=Ef0TlGNxvLlrliM2SCkhusgz9BsEGvVOBHAO62YC_QA,5421
11
- fimeval/__init__.py,sha256=kN114EvzG_BFjd65fKWXg29TqaWvR173EdCN3yj30oc,433
12
- fimeval/utilis.py,sha256=KNXcR0RvhT_lPqM_8cuAGXMpRtcLMfb_UDqUiMlDevs,7311
13
- fimeval-0.1.51.dist-info/LICENSE.txt,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
14
- fimeval-0.1.51.dist-info/METADATA,sha256=ic6uUVb5xRU0RyZMo-CUQ4YPjA_evv_DRBn_kmIqgIk,14848
15
- fimeval-0.1.51.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
16
- fimeval-0.1.51.dist-info/RECORD,,