fimeval 0.1.52__py3-none-any.whl → 0.1.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,4 @@
1
1
  import os
2
- import ee
3
2
  import glob
4
3
  import geopandas as gpd
5
4
  import rasterio
@@ -31,7 +30,6 @@ def GetFloodedBuildingCountInfo(
31
30
  basename,
32
31
  ):
33
32
  output_dir = os.path.dirname(building_fp_path)
34
-
35
33
  building_fp_gpkg = Changeintogpkg(
36
34
  building_fp_path, output_dir, "building_footprint"
37
35
  )
@@ -42,7 +40,6 @@ def GetFloodedBuildingCountInfo(
42
40
  with rasterio.open(raster1_path) as src:
43
41
  target_crs = str(src.crs)
44
42
 
45
- # Reproject all GeoDataFrames to the target CRS
46
43
  if building_gdf.crs != target_crs:
47
44
  building_gdf = building_gdf.to_crs(target_crs)
48
45
  print("reproject building_gdf")
@@ -55,43 +52,32 @@ def GetFloodedBuildingCountInfo(
55
52
  clipped_buildings["centroid"] = clipped_buildings.geometry.centroid
56
53
 
57
54
  centroid_counts = {
58
- "Benchmark": 0,
59
- "Candidate": 0,
60
55
  "False Positive": 0,
61
56
  "False Negative": 0,
62
57
  "True Positive": 0,
63
58
  }
64
59
 
65
- def count_centroids_in_raster(raster_path, label):
60
+ # Count centroids in the contingency map
61
+ def count_centroids_in_contingency(raster_path):
66
62
  with rasterio.open(raster_path) as src:
67
63
  raster_data = src.read(1)
68
- transform = src.transform
69
-
70
64
  for centroid in clipped_buildings["centroid"]:
71
65
  row, col = src.index(centroid.x, centroid.y)
72
66
  if 0 <= row < raster_data.shape[0] and 0 <= col < raster_data.shape[1]:
73
67
  pixel_value = raster_data[row, col]
74
- if label in ["Benchmark", "Candidate"]:
75
- if pixel_value == 2: # False Positive
76
- centroid_counts[label] += 1
77
- else:
78
- if pixel_value == 2:
79
- centroid_counts["False Positive"] += 1
80
- elif pixel_value == 3:
81
- centroid_counts["False Negative"] += 1
82
- elif pixel_value == 4:
83
- centroid_counts["True Positive"] += 1
84
-
85
- if "bm" in str(raster1_path).lower():
86
- count_centroids_in_raster(raster1_path, "Benchmark")
87
- count_centroids_in_raster(raster2_path, "Candidate")
88
-
89
- elif "candidate" in str(raster2_path).lower():
90
- count_centroids_in_raster(raster1_path, "Candidate")
91
- count_centroids_in_raster(raster2_path, "Benchmark")
68
+ if pixel_value == 2:
69
+ centroid_counts["False Positive"] += 1
70
+ elif pixel_value == 3:
71
+ centroid_counts["False Negative"] += 1
72
+ elif pixel_value == 4:
73
+ centroid_counts["True Positive"] += 1
74
+
75
+ count_centroids_in_contingency(contingency_map)
76
+
77
+ # Calculate Candidate and Benchmark counts from the contingency map counts
78
+ centroid_counts["Candidate"] = centroid_counts["True Positive"] + centroid_counts["False Positive"]
79
+ centroid_counts["Benchmark"] = centroid_counts["True Positive"] + centroid_counts["False Negative"]
92
80
 
93
- if "contingency" in str(contingency_map).lower():
94
- count_centroids_in_raster(contingency_map, "Contingency")
95
81
 
96
82
  total_buildings = len(clipped_buildings)
97
83
  percentages = {
@@ -106,10 +92,15 @@ def GetFloodedBuildingCountInfo(
106
92
  CSI = TP / (TP + FP + FN) if (TP + FP + FN) > 0 else 0
107
93
  FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
108
94
  POD = TP / (TP + FN) if (TP + FN) > 0 else 0
109
-
110
-
111
- BDR = (centroid_counts["Candidate"]- centroid_counts["Benchmark"])/centroid_counts["Benchmark"]
112
95
 
96
+ if centroid_counts["Benchmark"] > 0:
97
+ BDR = (
98
+ (centroid_counts["Candidate"] - centroid_counts["Benchmark"])
99
+ / centroid_counts["Benchmark"]
100
+ )
101
+ else:
102
+ BDR = 0
103
+
113
104
  counts_data = {
114
105
  "Category": [
115
106
  "Candidate",
@@ -223,6 +214,7 @@ def GetFloodedBuildingCountInfo(
223
214
  print(f"Performance metrics chart is saved as PNG at {output_path}")
224
215
  fig.show()
225
216
 
217
+
226
218
  def process_TIFF(
227
219
  tif_files, contingency_files, building_footprint, boundary, method_path
228
220
  ):
@@ -266,22 +258,49 @@ def process_TIFF(
266
258
  print("Warning: No benchmark file found.")
267
259
  elif not candidate_paths:
268
260
  print("Warning: No candidate files found.")
269
-
261
+
262
+
270
263
  def find_existing_footprint(out_dir):
271
264
  gpkg_files = list(Path(out_dir).glob("*.gpkg"))
272
265
  return gpkg_files[0] if gpkg_files else None
273
266
 
274
- #Incase user defined individual shapefile for each case study
267
+
268
+ # Incase user defined individual shapefile for each case study
275
269
  def detect_shapefile(folder):
276
- shapefile = None
277
- for ext in (".shp", ".gpkg", ".geojson", ".kml"):
278
- for file in os.listdir(folder):
279
- if file.lower().endswith(ext):
280
- shapefile = os.path.join(folder, file)
281
- print(f"Auto-detected shapefile: {shapefile}")
282
- return shapefile
283
- return None
284
-
270
+ shapefile = None
271
+ for ext in (".shp", ".gpkg", ".geojson", ".kml"):
272
+ for file in os.listdir(folder):
273
+ if file.lower().endswith(ext):
274
+ shapefile = os.path.join(folder, file)
275
+ print(f"Auto-detected shapefile: {shapefile}")
276
+ return shapefile
277
+ return None
278
+
279
+
280
+ def ensure_pyspark(version: str | None = "3.5.4") -> None:
281
+ """Install pyspark at runtime via `uv pip` into this env (no-op if present)."""
282
+ import importlib, shutil, subprocess, sys, re
283
+
284
+ try:
285
+ import importlib.util
286
+
287
+ if importlib.util.find_spec("pyspark"):
288
+ return
289
+ except Exception:
290
+ pass
291
+ uv = shutil.which("uv")
292
+ if not uv:
293
+ raise RuntimeError(
294
+ "`uv` not found on PATH. Please install uv or add it to PATH."
295
+ )
296
+ if version is None:
297
+ spec = "pyspark"
298
+ else:
299
+ v = version.strip()
300
+ spec = f"pyspark{v}" if re.match(r"^[<>=!~]", v) else f"pyspark=={v}"
301
+ subprocess.check_call([uv, "pip", "install", "--python", sys.executable, spec])
302
+
303
+
285
304
  def EvaluationWithBuildingFootprint(
286
305
  main_dir,
287
306
  method_name,
@@ -289,6 +308,7 @@ def EvaluationWithBuildingFootprint(
289
308
  country=None,
290
309
  building_footprint=None,
291
310
  shapefile_dir=None,
311
+ geeprojectID=None,
292
312
  ):
293
313
  tif_files_main = glob.glob(os.path.join(main_dir, "*.tif"))
294
314
  if tif_files_main:
@@ -303,9 +323,7 @@ def EvaluationWithBuildingFootprint(
303
323
 
304
324
  if shapefile_dir:
305
325
  boundary = shapefile_dir
306
- elif os.path.exists(
307
- os.path.join(method_path, "BoundaryforEvaluation")
308
- ):
326
+ elif os.path.exists(os.path.join(method_path, "BoundaryforEvaluation")):
309
327
  boundary = os.path.join(
310
328
  method_path, "BoundaryforEvaluation", "FIMEvaluatedExtent.shp"
311
329
  )
@@ -313,8 +331,10 @@ def EvaluationWithBuildingFootprint(
313
331
  boundary = detect_shapefile(main_dir)
314
332
 
315
333
  building_footprintMS = building_footprint
334
+
316
335
  if building_footprintMS is None:
317
- import msfootprint as msf
336
+ ensure_pyspark()
337
+ from .microsoftBF import BuildingFootprintwithISO
318
338
 
319
339
  out_dir = os.path.join(method_path, "BuildingFootprint")
320
340
  if not os.path.exists(out_dir):
@@ -323,7 +343,15 @@ def EvaluationWithBuildingFootprint(
323
343
  if not EX_building_footprint:
324
344
  boundary_dir = shapefile_dir if shapefile_dir else boundary
325
345
 
326
- msf.BuildingFootprintwithISO(country, boundary_dir, out_dir)
346
+ if geeprojectID:
347
+ BuildingFootprintwithISO(
348
+ country,
349
+ boundary_dir,
350
+ out_dir,
351
+ geeprojectID=geeprojectID,
352
+ )
353
+ else:
354
+ BuildingFootprintwithISO(country, boundary_dir, out_dir)
327
355
  building_footprintMS = os.path.join(
328
356
  out_dir, f"building_footprint.gpkg"
329
357
  )
@@ -355,14 +383,18 @@ def EvaluationWithBuildingFootprint(
355
383
  os.path.join(method_path, "BoundaryforEvaluation")
356
384
  ):
357
385
  boundary = os.path.join(
358
- method_path, "BoundaryforEvaluation", "FIMEvaluatedExtent.shp"
386
+ method_path,
387
+ "BoundaryforEvaluation",
388
+ "FIMEvaluatedExtent.shp",
359
389
  )
360
390
  else:
361
391
  boundary = detect_shapefile(os.path.join(main_dir, folder))
362
392
 
363
393
  building_footprintMS = building_footprint
394
+
364
395
  if building_footprintMS is None:
365
- import msfootprint as msf
396
+ ensure_pyspark()
397
+ from .microsoftBF import BuildingFootprintwithISO
366
398
 
367
399
  out_dir = os.path.join(method_path, "BuildingFootprint")
368
400
  if not os.path.exists(out_dir):
@@ -372,9 +404,17 @@ def EvaluationWithBuildingFootprint(
372
404
  boundary_dir = (
373
405
  shapefile_dir if shapefile_dir else boundary
374
406
  )
375
- msf.BuildingFootprintwithISO(
376
- country, boundary_dir, out_dir
377
- )
407
+ if geeprojectID:
408
+ BuildingFootprintwithISO(
409
+ country,
410
+ boundary_dir,
411
+ out_dir,
412
+ geeprojectID=geeprojectID,
413
+ )
414
+ else:
415
+ BuildingFootprintwithISO(
416
+ country, boundary_dir, out_dir
417
+ )
378
418
  building_footprintMS = os.path.join(
379
419
  out_dir, f"building_footprint.gpkg"
380
420
  )
@@ -0,0 +1,132 @@
1
+ # Importing necessary libraries
2
+ import geemap
3
+ import ee
4
+ import os
5
+ from shapely.geometry import box
6
+ import pandas as pd
7
+ from pathlib import Path
8
+ import geopandas as gpd
9
+ from pyspark.sql import SparkSession
10
+ from shapely.wkt import loads
11
+ import shutil
12
+
13
+ # Suppress the warnings
14
+ import warnings
15
+
16
+ warnings.filterwarnings("ignore")
17
+
18
+ # Authenticate and initialize Earth Engine
19
+ ee.Authenticate()
20
+
21
+
22
+ # %%
23
+ def split_into_tiles(boundary, tile_size=0.1):
24
+ bounds = boundary.total_bounds
25
+ x_min, y_min, x_max, y_max = bounds
26
+ tiles = []
27
+ x = x_min
28
+ while x < x_max:
29
+ y = y_min
30
+ while y < y_max:
31
+ tile = box(x, y, x + tile_size, y + tile_size)
32
+ if tile.intersects(boundary.unary_union):
33
+ tiles.append(tile)
34
+ y += tile_size
35
+ x += tile_size
36
+ return gpd.GeoDataFrame(geometry=tiles, crs=boundary.crs)
37
+
38
+
39
+ # Merge the final geojson files
40
+ def mergeGeoJSONfiles(output_dir, merged_file):
41
+ output_dir = Path(output_dir)
42
+ files = list(output_dir.glob("*.geojson"))
43
+ gdfs = [gpd.read_file(file) for file in files]
44
+ merged_gdf = gpd.GeoDataFrame(pd.concat(gdfs, ignore_index=True), crs="EPSG:4326")
45
+ merged_gdf.to_file(merged_file, driver="GPKG")
46
+
47
+
48
+ # Process each batch with number of tiles
49
+ def process_batch(partition, collection_name, output_dir, boundary_wkt, projectID=None):
50
+ try:
51
+ if projectID:
52
+ ee.Initialize(project=projectID)
53
+ else:
54
+ ee.Initialize()
55
+
56
+ except Exception:
57
+ print("To initialize, please provide the earth engine project ID")
58
+
59
+ # Convert WKT boundary to geometry
60
+ boundary = loads(boundary_wkt)
61
+ results = []
62
+
63
+ for tile_wkt in partition:
64
+ try:
65
+ tile = loads(tile_wkt)
66
+ aoi = ee.Geometry(tile.__geo_interface__)
67
+ collection = ee.FeatureCollection(collection_name).filterBounds(aoi)
68
+
69
+ # Download features and filter by boundary
70
+ gdf = geemap.ee_to_gdf(collection)
71
+ gdf = gdf[gdf.geometry.intersects(boundary)]
72
+
73
+ # Save each tile as a GeoJSON file
74
+ tile_id = f"tile_{hash(tile)}"
75
+ output_file = Path(output_dir) / f"{tile_id}.geojson"
76
+ gdf.to_file(output_file, driver="GeoJSON")
77
+ results.append(f"Saved: {output_file}")
78
+ except Exception as e:
79
+ results.append(f"Error processing tile: {e}")
80
+
81
+ return results
82
+
83
+
84
+ def getBuildingFootprintSpark(
85
+ countryISO, boundary_file, out_dir, tile_size, projectID=None
86
+ ):
87
+ spark = SparkSession.builder.appName("BuildingFootprints").getOrCreate()
88
+
89
+ # Make temporary directory
90
+ temp_dir = out_dir / "temp"
91
+ temp_dir.mkdir(parents=True, exist_ok=True)
92
+
93
+ # Load and process boundary
94
+ boundary = gpd.read_file(boundary_file).to_crs("EPSG:4326")
95
+ tiles = split_into_tiles(boundary, tile_size)
96
+ boundary_wkt = boundary.unary_union.wkt
97
+
98
+ collection_names = [f"projects/sat-io/open-datasets/VIDA_COMBINED/{countryISO}"]
99
+
100
+ # Distribute processing
101
+ for collection_name in collection_names:
102
+ tiles_rdd = spark.sparkContext.parallelize(
103
+ tiles.geometry.apply(lambda x: x.wkt).tolist(), numSlices=10
104
+ )
105
+ results = tiles_rdd.mapPartitions(
106
+ lambda partition: process_batch(
107
+ partition, collection_name, str(temp_dir), boundary_wkt, projectID
108
+ )
109
+ ).collect()
110
+
111
+ # Merge GeoJSON files
112
+ mergeGeoJSONfiles(temp_dir, out_dir / "building_footprint.gpkg")
113
+
114
+ # Clean up the temp directory
115
+ shutil.rmtree(temp_dir, ignore_errors=True)
116
+
117
+ print(f"Building footprint data saved to {out_dir / 'building_footprint.gpkg'}")
118
+
119
+
120
+ # %%
121
+ # Export the building footprint
122
+ def BuildingFootprintwithISO(countryISO, ROI, out_dir, geeprojectID=None):
123
+ out_dir = Path(out_dir)
124
+ out_dir.mkdir(parents=True, exist_ok=True)
125
+ filename = out_dir / "building_footprint.gpkg"
126
+
127
+ if filename.exists():
128
+ os.remove(filename)
129
+
130
+ getBuildingFootprintSpark(
131
+ countryISO, ROI, out_dir, tile_size=0.05, projectID=geeprojectID
132
+ )
@@ -1,4 +1,4 @@
1
- #import Libraries
1
+ # import Libraries
2
2
  import geopandas as gpd
3
3
  import boto3
4
4
  import botocore
@@ -7,27 +7,27 @@ import tempfile
7
7
 
8
8
  # Initialize an anonymous S3 client
9
9
  s3 = boto3.client(
10
- 's3',
11
- config=botocore.config.Config(signature_version=botocore.UNSIGNED)
10
+ "s3", config=botocore.config.Config(signature_version=botocore.UNSIGNED)
12
11
  )
13
12
 
14
- bucket_name = 'sdmlab'
15
- pwb_folder = "PWB/"
13
+ bucket_name = "sdmlab"
14
+ pwb_folder = "PWB/"
15
+
16
16
 
17
17
  def PWB_inS3(s3_client, bucket, prefix):
18
18
  """Download all components of a shapefile from S3 into a temporary directory."""
19
19
  tmp_dir = tempfile.mkdtemp()
20
20
  response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)
21
- if 'Contents' not in response:
21
+ if "Contents" not in response:
22
22
  raise ValueError("No files found in the specified S3 folder.")
23
-
24
- for obj in response['Contents']:
25
- file_key = obj['Key']
26
- file_name = os.path.basename(file_key)
27
- if file_name.endswith(('.shp', '.shx', '.dbf', '.prj', '.cpg')):
23
+
24
+ for obj in response["Contents"]:
25
+ file_key = obj["Key"]
26
+ file_name = os.path.basename(file_key)
27
+ if file_name.endswith((".shp", ".shx", ".dbf", ".prj", ".cpg")):
28
28
  local_path = os.path.join(tmp_dir, file_name)
29
29
  s3_client.download_file(bucket, file_key, local_path)
30
-
30
+
31
31
  shp_files = [f for f in os.listdir(tmp_dir) if f.endswith(".shp")]
32
32
  if not shp_files:
33
33
  raise ValueError("No .shp file found after download.")
@@ -35,7 +35,8 @@ def PWB_inS3(s3_client, bucket, prefix):
35
35
  shp_path = os.path.join(tmp_dir, shp_files[0])
36
36
  return shp_path
37
37
 
38
+
38
39
  def get_PWB():
39
40
  shp_path = PWB_inS3(s3, bucket_name, pwb_folder)
40
41
  pwb = gpd.read_file(shp_path)
41
- return pwb
42
+ return pwb
@@ -21,28 +21,45 @@ from .metrics import evaluationmetrics
21
21
  from .PWBs3 import get_PWB
22
22
  from ..utilis import MakeFIMsUniform
23
23
 
24
- #giving the permission to the folder
24
+
25
+ # giving the permission to the folder
25
26
  def is_writable(path):
26
27
  """Check if the directory and its contents are writable."""
27
28
  path = Path(path)
28
29
  return os.access(path, os.W_OK)
29
30
 
31
+
30
32
  def fix_permissions(path):
31
33
  path = Path(path).resolve()
32
- script_path = Path(__file__).parent / "fix_permissions.sh"
33
-
34
- if not script_path.exists():
35
- raise FileNotFoundError(f"Shell script not found: {script_path}")
36
34
 
37
35
  if is_writable(path):
38
- return
36
+ return
37
+
38
+ uname = platform.system()
39
39
 
40
40
  try:
41
- result = subprocess.run(["bash", str(script_path), str(path)],
42
- check=True, capture_output=True, text=True)
43
- print(result.stdout)
41
+ if uname in ["Darwin", "Linux"]:
42
+ subprocess.run(
43
+ ["chmod", "-R", "u+rwX", str(path)],
44
+ check=True,
45
+ capture_output=True,
46
+ text=True,
47
+ )
48
+ print(f"Permissions granted for user (u+rwX): {path}")
49
+
50
+ elif "MINGW" in uname or "MSYS" in uname or "CYGWIN" in uname:
51
+ subprocess.run(
52
+ ["icacls", str(path), "/grant", "Everyone:F", "/T"],
53
+ check=True,
54
+ capture_output=True,
55
+ text=True,
56
+ )
57
+ print(f"Permissions granted for working folder: {path}")
58
+
59
+ else:
60
+ print(f"Unsupported OS: {uname}")
44
61
  except subprocess.CalledProcessError as e:
45
- print(f"Shell script failed:\n{e.stderr}")
62
+ print(f"Failed to fix permissions for {path}:\n{e.stderr}")
46
63
 
47
64
 
48
65
  # Function for the evalution of the model
@@ -79,7 +96,7 @@ def evaluateFIM(
79
96
  # Get the smallest matched raster extent and make a boundary shapefile
80
97
  smallest_raster_path = get_smallest_raster_path(benchmark_path, *candidate_paths)
81
98
 
82
- #If method is AOI, and direct shapefile directory is not provided, then it will search for the shapefile in the folder
99
+ # If method is AOI, and direct shapefile directory is not provided, then it will search for the shapefile in the folder
83
100
  if method.__name__ == "AOI":
84
101
  # If shapefile is not provided, search in the folder
85
102
  if shapefile is None:
@@ -358,7 +375,8 @@ def evaluateFIM(
358
375
  print(f"Evaluation metrics saved to {csv_file}")
359
376
  return results
360
377
 
361
- #Safely deleting the folder
378
+
379
+ # Safely deleting the folder
362
380
  def safe_delete_folder(folder_path):
363
381
  fix_permissions(folder_path)
364
382
  try:
@@ -370,19 +388,28 @@ def safe_delete_folder(folder_path):
370
388
  except Exception as e:
371
389
  print(f"Error deleting {folder_path}: {e}")
372
390
 
373
- def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=None, target_crs=None, target_resolution=None):
391
+
392
+ def EvaluateFIM(
393
+ main_dir,
394
+ method_name,
395
+ output_dir,
396
+ PWB_dir=None,
397
+ shapefile_dir=None,
398
+ target_crs=None,
399
+ target_resolution=None,
400
+ ):
374
401
  main_dir = Path(main_dir)
375
402
  # Read the permanent water bodies
376
403
  if PWB_dir is None:
377
404
  gdf = get_PWB()
378
405
  else:
379
406
  gdf = gpd.read_file(PWB_dir)
380
-
381
- #Grant the permission to the main directory
407
+
408
+ # Grant the permission to the main directory
382
409
  print(f"Fixing permissions for {main_dir}...")
383
410
  fix_permissions(main_dir)
384
411
 
385
- #runt the process
412
+ # runt the process
386
413
  def process_TIFF(tif_files, folder_dir):
387
414
  benchmark_path = None
388
415
  candidate_path = []
@@ -422,7 +449,9 @@ def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=N
422
449
  # Check if main_dir directly contains tif files
423
450
  TIFFfiles_main_dir = list(main_dir.glob("*.tif"))
424
451
  if TIFFfiles_main_dir:
425
- MakeFIMsUniform(main_dir, target_crs=target_crs, target_resolution=target_resolution)
452
+ MakeFIMsUniform(
453
+ main_dir, target_crs=target_crs, target_resolution=target_resolution
454
+ )
426
455
 
427
456
  # processing folder
428
457
  processing_folder = main_dir / "processing"
@@ -434,15 +463,20 @@ def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=N
434
463
  for folder in main_dir.iterdir():
435
464
  if folder.is_dir():
436
465
  tif_files = list(folder.glob("*.tif"))
437
-
466
+
438
467
  if tif_files:
439
- MakeFIMsUniform(folder, target_crs=target_crs, target_resolution=target_resolution)
440
-
468
+ MakeFIMsUniform(
469
+ folder,
470
+ target_crs=target_crs,
471
+ target_resolution=target_resolution,
472
+ )
473
+
441
474
  processing_folder = folder / "processing"
442
475
  TIFFfiles = list(processing_folder.glob("*.tif"))
443
476
 
444
477
  process_TIFF(TIFFfiles, folder)
445
478
  safe_delete_folder(processing_folder)
446
479
  else:
447
- print(f"Skipping {folder.name} as it doesn't contain any tif files.")
448
-
480
+ print(
481
+ f"Skipping {folder.name} as it doesn't contain any tif files."
482
+ )
@@ -7,21 +7,21 @@ def evaluationmetrics(out_image1, out_image2):
7
7
  unique_values, counts = np.unique(merged, return_counts=True)
8
8
  class_pixel_counts = dict(zip(unique_values, counts))
9
9
  class_pixel_counts
10
- TN = class_pixel_counts.get(1,0)
11
- FP = class_pixel_counts.get(2,0)
12
- FN = class_pixel_counts.get(3,0)
13
- TP = class_pixel_counts.get(4,0)
10
+ TN = class_pixel_counts.get(1, 0)
11
+ FP = class_pixel_counts.get(2, 0)
12
+ FN = class_pixel_counts.get(3, 0)
13
+ TP = class_pixel_counts.get(4, 0)
14
14
  epsilon = 1e-8
15
- TPR = TP / (TP + FN+epsilon)
16
- FNR = FN / (TP + FN+epsilon)
17
- Acc = (TP + TN) / (TP + TN + FP + FN+epsilon)
18
- Prec = TP / (TP + FP+epsilon)
19
- sen = TP / (TP + FN+epsilon)
20
- F1_score = 2 * (Prec * sen) / (Prec + sen+epsilon)
21
- CSI = TP / (TP + FN + FP+epsilon)
22
- POD = TP / (TP + FN+epsilon)
23
- FPR = FP / (FP + TN+epsilon)
24
- FAR = FP / (TP + FP+epsilon)
15
+ TPR = TP / (TP + FN + epsilon)
16
+ FNR = FN / (TP + FN + epsilon)
17
+ Acc = (TP + TN) / (TP + TN + FP + FN + epsilon)
18
+ Prec = TP / (TP + FP + epsilon)
19
+ sen = TP / (TP + FN + epsilon)
20
+ F1_score = 2 * (Prec * sen) / (Prec + sen + epsilon)
21
+ CSI = TP / (TP + FN + FP + epsilon)
22
+ POD = TP / (TP + FN + epsilon)
23
+ FPR = FP / (FP + TN + epsilon)
24
+ FAR = FP / (TP + FP + epsilon)
25
25
 
26
26
  return (
27
27
  unique_values,
@@ -65,7 +65,7 @@ def getContingencyMap(raster_path, method_path):
65
65
  2: "False positive",
66
66
  3: "False negative",
67
67
  4: "True positive",
68
- 5: "Permanent water bodies"
68
+ 5: "Permanent water bodies",
69
69
  }
70
70
  legend_patches = [
71
71
  Patch(
fimeval/__init__.py CHANGED
@@ -1,11 +1,20 @@
1
- #Evaluation modules
1
+ # Evaluation modules
2
2
  from .ContingencyMap.evaluationFIM import EvaluateFIM
3
3
  from .ContingencyMap.printcontingency import PrintContingencyMap
4
4
  from .ContingencyMap.plotevaluationmetrics import PlotEvaluationMetrics
5
5
  from .ContingencyMap.PWBs3 import get_PWB
6
6
 
7
- #Utility modules
7
+ # Utility modules
8
8
  from .utilis import compress_tif_lzw
9
9
 
10
10
  # Evaluation with Building foorprint module
11
11
  from .BuildingFootprint.evaluationwithBF import EvaluationWithBuildingFootprint
12
+
13
+ __all__ = [
14
+ "EvaluateFIM",
15
+ "PrintContingencyMap",
16
+ "PlotEvaluationMetrics",
17
+ "get_PWB",
18
+ "EvaluationWithBuildingFootprint",
19
+ "compress_tif_lzw",
20
+ ]