fimeval 0.1.55__py3-none-any.whl → 0.1.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
1
  import os
2
+ import re
2
3
  import numpy as np
3
4
  from pathlib import Path
4
5
  import geopandas as gpd
@@ -12,6 +13,8 @@ from rasterio.io import MemoryFile
12
13
  from rasterio import features
13
14
  from rasterio.mask import mask
14
15
 
16
+ os.environ["CHECK_DISK_FREE_SPACE"] = "NO"
17
+
15
18
  import warnings
16
19
 
17
20
  warnings.filterwarnings("ignore", category=rasterio.errors.ShapeSkipWarning)
@@ -19,7 +22,8 @@ warnings.filterwarnings("ignore", category=rasterio.errors.ShapeSkipWarning)
19
22
  from .methods import AOI, smallest_extent, convex_hull, get_smallest_raster_path
20
23
  from .metrics import evaluationmetrics
21
24
  from .PWBs3 import get_PWB
22
- from ..utilis import MakeFIMsUniform
25
+ from ..utilis import MakeFIMsUniform, benchmark_name, find_best_boundary
26
+ from ..setup_benchFIM import ensure_benchmark
23
27
 
24
28
 
25
29
  # giving the permission to the folder
@@ -98,20 +102,18 @@ def evaluateFIM(
98
102
 
99
103
  # If method is AOI, and direct shapefile directory is not provided, then it will search for the shapefile in the folder
100
104
  if method.__name__ == "AOI":
101
- # If shapefile is not provided, search in the folder
105
+ # Ubest-matching boundary file, prefer .gpkg from benchFIM downloads
102
106
  if shapefile is None:
103
- for ext in (".shp", ".gpkg", ".geojson", ".kml"):
104
- for file in os.listdir(folder):
105
- if file.lower().endswith(ext):
106
- shapefile = os.path.join(folder, file)
107
- print(f"Auto-detected shapefile: {shapefile}")
108
- break
109
- if shapefile:
110
- break
111
- if shapefile is None:
107
+ shapefile_path = find_best_boundary(Path(folder), Path(benchmark_path))
108
+ if shapefile_path is None:
112
109
  raise FileNotFoundError(
113
- "No shapefile (.shp, .gpkg, .geojson, .kml) found in the folder and none provided. Either provide a shapefile directory or put shapefile inside folder directory."
110
+ f"No boundary file (.gpkg, .shp, .geojson, .kml) found in {folder}. "
111
+ "Either provide a shapefile path or place a boundary file in the folder."
114
112
  )
113
+ shapefile = str(shapefile_path)
114
+ else:
115
+ shapefile = str(shapefile)
116
+
115
117
  # Run AOI with the found or provided shapefile
116
118
  bounding_geom = AOI(benchmark_path, shapefile, save_dir)
117
119
 
@@ -277,6 +279,7 @@ def evaluateFIM(
277
279
  out_transform1,
278
280
  )
279
281
  merged = out_image1 + out_image2_resized
282
+ merged[merged == 7] = 5
280
283
 
281
284
  # Get Evaluation Metrics
282
285
  (
@@ -391,13 +394,17 @@ def safe_delete_folder(folder_path):
391
394
 
392
395
  def EvaluateFIM(
393
396
  main_dir,
394
- method_name,
395
- output_dir,
397
+ method_name=None,
398
+ output_dir=None,
396
399
  PWB_dir=None,
397
400
  shapefile_dir=None,
398
401
  target_crs=None,
399
402
  target_resolution=None,
403
+ benchmark_dict=None,
400
404
  ):
405
+ if output_dir is None:
406
+ output_dir = os.path.join(os.getcwd(), "Evaluation_Results")
407
+
401
408
  main_dir = Path(main_dir)
402
409
  # Read the permanent water bodies
403
410
  if PWB_dir is None:
@@ -413,32 +420,46 @@ def EvaluateFIM(
413
420
  benchmark_path = None
414
421
  candidate_path = []
415
422
 
416
- if len(tif_files) == 2:
417
- for tif_file in tif_files:
418
- if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
419
- benchmark_path = tif_file
420
- else:
421
- candidate_path.append(tif_file)
423
+ for tif_file in tif_files:
424
+ if benchmark_name(tif_file):
425
+ benchmark_path = tif_file
426
+ else:
427
+ candidate_path.append(tif_file)
422
428
 
423
- elif len(tif_files) > 2:
424
- for tif_file in tif_files:
425
- if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
426
- benchmark_path = tif_file
429
+ if benchmark_path and candidate_path:
430
+ if method_name is None:
431
+ local_method = "AOI"
432
+
433
+ #For single case, if user have explicitly send boundary, use that, else use the boundary from the benchmark FIM evaluation
434
+ if shapefile_dir is not None:
435
+ local_shapefile = shapefile_dir
427
436
  else:
428
- candidate_path.append(tif_file)
437
+ boundary = find_best_boundary(folder_dir, benchmark_path)
438
+ if boundary is None:
439
+ print(
440
+ f"Skipping {folder_dir.name}: no boundary file found "
441
+ f"and method_name is None (auto-AOI)."
442
+ )
443
+ return
444
+ local_shapefile = str(boundary)
445
+ else:
446
+ local_method = method_name
447
+ local_shapefile = shapefile_dir
429
448
 
430
- if benchmark_path and candidate_path:
431
449
  print(f"**Flood Inundation Evaluation of {folder_dir.name}**")
432
- Metrics = evaluateFIM(
433
- benchmark_path,
434
- candidate_path,
435
- gdf,
436
- folder_dir,
437
- method_name,
438
- output_dir,
439
- shapefile_dir,
440
- )
441
- print("\n", Metrics, "\n")
450
+ try:
451
+ Metrics = evaluateFIM(
452
+ benchmark_path,
453
+ candidate_path,
454
+ gdf,
455
+ folder_dir,
456
+ local_method,
457
+ output_dir,
458
+ shapefile=local_shapefile,
459
+ )
460
+ print("\n", Metrics, "\n")
461
+ except Exception as e:
462
+ print(f"Error evaluating {folder_dir.name}: {e}")
442
463
  else:
443
464
  print(
444
465
  f"Skipping {folder_dir.name} as it doesn't have a valid benchmark and candidate configuration."
@@ -447,34 +468,54 @@ def EvaluateFIM(
447
468
  # Check if main_dir directly contains tif files
448
469
  TIFFfiles_main_dir = list(main_dir.glob("*.tif"))
449
470
  if TIFFfiles_main_dir:
450
- MakeFIMsUniform(
451
- main_dir, target_crs=target_crs, target_resolution=target_resolution
471
+
472
+ # Ensure benchmark is present if needed
473
+ TIFFfiles_main_dir = ensure_benchmark(
474
+ main_dir, TIFFfiles_main_dir, benchmark_dict
452
475
  )
453
476
 
454
- # processing folder
455
477
  processing_folder = main_dir / "processing"
456
- TIFFfiles = list(processing_folder.glob("*.tif"))
478
+ try:
479
+ MakeFIMsUniform(
480
+ main_dir, target_crs=target_crs, target_resolution=target_resolution
481
+ )
457
482
 
458
- process_TIFF(TIFFfiles, main_dir)
459
- safe_delete_folder(processing_folder)
483
+ # processing folder
484
+ TIFFfiles = list(processing_folder.glob("*.tif"))
485
+
486
+ process_TIFF(TIFFfiles, main_dir)
487
+ except Exception as e:
488
+ print(f"Error processing {main_dir}: {e}")
489
+ finally:
490
+ safe_delete_folder(processing_folder)
460
491
  else:
461
492
  for folder in main_dir.iterdir():
462
493
  if folder.is_dir():
463
494
  tif_files = list(folder.glob("*.tif"))
464
495
 
465
496
  if tif_files:
466
- MakeFIMsUniform(
467
- folder,
468
- target_crs=target_crs,
469
- target_resolution=target_resolution,
470
- )
497
+ processing_folder = folder / "processing"
498
+ try:
499
+ # Ensure benchmark is present if needed
500
+ tif_files = ensure_benchmark(
501
+ folder, tif_files, benchmark_dict
502
+ )
503
+
504
+ MakeFIMsUniform(
505
+ folder,
506
+ target_crs=target_crs,
507
+ target_resolution=target_resolution,
508
+ )
471
509
 
472
- processing_folder = folder / "processing"
473
- TIFFfiles = list(processing_folder.glob("*.tif"))
510
+ TIFFfiles = list(processing_folder.glob("*.tif"))
474
511
 
475
- process_TIFF(TIFFfiles, folder)
476
- safe_delete_folder(processing_folder)
512
+ process_TIFF(TIFFfiles, folder)
513
+ except Exception as e:
514
+ print(f"Error processing folder {folder.name}: {e}")
515
+ finally:
516
+ safe_delete_folder(processing_folder)
477
517
  else:
478
518
  print(
479
519
  f"Skipping {folder.name} as it doesn't contain any tif files."
480
520
  )
521
+
@@ -12,19 +12,21 @@ def PlotMetrics(csv_path, method_path):
12
12
 
13
13
  # Keep only the desired metrics
14
14
  metrics = metrics_df.loc[
15
- metrics_df["Metrics"].isin([
16
- "CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"
17
- ])
15
+ metrics_df["Metrics"].isin(
16
+ ["CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"]
17
+ )
18
18
  ].copy()
19
19
 
20
20
  # Rename for presentation
21
- metrics["Metrics"] = metrics["Metrics"].replace({
22
- "CSI_values": "CSI",
23
- "POD_values": "POD",
24
- "Acc_values": "Accuracy",
25
- "Prec_values": "Precision",
26
- "F1_values": "F1 Score",
27
- })
21
+ metrics["Metrics"] = metrics["Metrics"].replace(
22
+ {
23
+ "CSI_values": "CSI",
24
+ "POD_values": "POD",
25
+ "Acc_values": "Accuracy",
26
+ "Prec_values": "Precision",
27
+ "F1_values": "F1 Score",
28
+ }
29
+ )
28
30
 
29
31
  value_columns = metrics.select_dtypes(include="number").columns
30
32
 
@@ -44,7 +46,7 @@ def PlotMetrics(csv_path, method_path):
44
46
  color="Metrics",
45
47
  orientation="h",
46
48
  color_discrete_sequence=px.colors.qualitative.Set2,
47
- title=f"Performance Metrics"
49
+ title=f"Performance Metrics",
48
50
  )
49
51
  fig_plotly.update_traces(texttemplate="%{text:.2f}", textposition="outside")
50
52
  fig_plotly.update_layout(
@@ -67,34 +69,36 @@ def PlotMetrics(csv_path, method_path):
67
69
  data=metrics,
68
70
  x=value_column,
69
71
  y="Metrics",
70
- hue="Metrics",
72
+ hue="Metrics",
71
73
  palette="Set2",
72
74
  ax=ax,
73
75
  dodge=False,
74
- legend=False
76
+ legend=False,
75
77
  )
76
78
 
77
79
  # Annotate bars
78
80
  for container in ax.containers:
79
- ax.bar_label(container, fmt='%.2f', label_type='edge', fontsize=14)
81
+ ax.bar_label(container, fmt="%.2f", label_type="edge", fontsize=14)
80
82
 
81
83
  # Styling
82
84
  ax.set_title("Performance Metrics", fontsize=16)
83
- ax.set_xlabel("Score", fontsize=16, color="black") # just bigger, not bold
85
+ ax.set_xlabel("Score", fontsize=16, color="black") # just bigger, not bold
84
86
  ax.set_ylabel("Metrics", fontsize=16, color="black")
85
87
 
86
- ax.set_xticks([i/10 for i in range(0, 11, 2)])
87
- ax.set_xticklabels([f"{i/10:.1f}" for i in range(0, 11, 2)], fontsize=14, color="black")
88
+ ax.set_xticks([i / 10 for i in range(0, 11, 2)])
89
+ ax.set_xticklabels(
90
+ [f"{i/10:.1f}" for i in range(0, 11, 2)], fontsize=14, color="black"
91
+ )
88
92
 
89
93
  # Increase y-tick label font size
90
94
  ax.tick_params(axis="y", labelsize=12, colors="black")
91
95
  ax.tick_params(axis="x", labelsize=14, colors="black")
92
96
 
93
97
  # Force spines black + thicker
94
- ax.spines['left'].set_linewidth(1.5)
95
- ax.spines['bottom'].set_linewidth(1.5)
96
- ax.spines['left'].set_color("black")
97
- ax.spines['bottom'].set_color("black")
98
+ ax.spines["left"].set_linewidth(1.5)
99
+ ax.spines["bottom"].set_linewidth(1.5)
100
+ ax.spines["left"].set_color("black")
101
+ ax.spines["bottom"].set_color("black")
98
102
 
99
103
  sns.despine(right=True, top=True)
100
104
 
fimeval/__init__.py CHANGED
@@ -10,6 +10,9 @@ from .utilis import compress_tif_lzw
10
10
  # Evaluation with Building foorprint module
11
11
  from .BuildingFootprint.evaluationwithBF import EvaluationWithBuildingFootprint
12
12
 
13
+ #Access benchmark FIM module
14
+ from .BenchFIMQuery.access_benchfim import benchFIMquery
15
+
13
16
  __all__ = [
14
17
  "EvaluateFIM",
15
18
  "PrintContingencyMap",
@@ -17,4 +20,5 @@ __all__ = [
17
20
  "get_PWB",
18
21
  "EvaluationWithBuildingFootprint",
19
22
  "compress_tif_lzw",
23
+ "benchFIMquery",
20
24
  ]
@@ -0,0 +1,39 @@
1
+ """
2
+ This code setup all the case folders whether it has valid benchmark FIM/ which benchmark need to access from catalog and so on.
3
+ Basically It will do everything before going into the actual evaluation process.
4
+ Author: Supath Dhital
5
+ Date updated: 25 Nov, 2025
6
+ """
7
+ from pathlib import Path
8
+
9
+ from .BenchFIMQuery.access_benchfim import benchFIMquery
10
+ from .utilis import benchmark_name
11
+
12
+ def ensure_benchmark(folder_dir, tif_files, benchmark_map):
13
+ """
14
+ If no local benchmark is found in `tif_files`, and `folder_dir.name`
15
+ exists in `benchmark_map`, download it into this folder using benchFIMquery.
16
+ Returns an updated list of tif files.
17
+ """
18
+ folder_dir = Path(folder_dir)
19
+
20
+ # If a benchmark/BM tif is already present, just use existing files
21
+ has_benchmark = any(benchmark_name(f) for f in tif_files)
22
+ if has_benchmark or not benchmark_map:
23
+ return tif_files
24
+
25
+ # If folder not in mapping, do nothing
26
+ folder_key = folder_dir.name
27
+ file_name = benchmark_map.get(folder_key)
28
+ if not file_name:
29
+ return tif_files
30
+
31
+ # Download benchmark FIM by filename into this folder
32
+ benchFIMquery(
33
+ file_name=file_name,
34
+ download=True,
35
+ out_dir=str(folder_dir),
36
+ )
37
+
38
+ # Return refreshed tif list
39
+ return list(folder_dir.glob("*.tif"))
fimeval/utilis.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import os
2
+ import re
2
3
  import shutil
3
4
  import pyproj
4
5
  import rasterio
@@ -182,3 +183,51 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
182
183
  resample_to_resolution(str(src_path), coarsest_x, coarsest_y)
183
184
  else:
184
185
  print("All rasters already have the same resolution. No resampling needed.")
186
+
187
+ #Function to find the best boundary file in the folder if multiple boundary files are present
188
+ def find_best_boundary(folder: Path, benchmark_path: Path):
189
+ """
190
+ Choose the best boundary file in `folder`:
191
+ - prefer .gpkg (from benchFIM downloads),
192
+ - otherwise, pick the file with the most name tokens in common with the benchmark.
193
+ """
194
+ exts = [".gpkg", ".shp", ".geojson", ".kml"]
195
+ candidates = []
196
+ for ext in exts:
197
+ candidates.extend(folder.glob(f"*{ext}"))
198
+
199
+ if not candidates:
200
+ return None
201
+ if len(candidates) == 1:
202
+ print(f"Auto-detected boundary: {candidates[0]}")
203
+ return candidates[0]
204
+
205
+ bench_tokens = set(
206
+ t for t in re.split(r"[_\-\.\s]+", benchmark_path.stem.lower()) if t
207
+ )
208
+
209
+ def score(path: Path):
210
+ name_tokens = set(
211
+ t for t in re.split(r"[_\-\.\s]+", path.stem.lower()) if t
212
+ )
213
+ common = len(bench_tokens & name_tokens)
214
+ bonus = 1 if path.suffix.lower() == ".gpkg" else 0
215
+ return (common, bonus)
216
+
217
+ best = max(candidates, key=score)
218
+ print(f"Auto-detected boundary (best match to benchmark): {best}")
219
+ return best
220
+
221
+
222
+ #To test whether the tif is benchmark or not
223
+ def benchmark_name(f: Path) -> bool:
224
+ name = f.stem.lower()
225
+
226
+ # Explicit word
227
+ if "benchmark" in name:
228
+ return True
229
+
230
+ # Treating underscores/dashes/dots as separators and look for a 'bm' token
231
+ tokens = re.split(r"[_\-\.\s]+", name)
232
+ return "bm" in tokens
233
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fimeval
3
- Version: 0.1.55
3
+ Version: 0.1.57
4
4
  Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
5
5
  Author: Surface Dynamics Modeling Lab
6
6
  Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
@@ -680,9 +680,8 @@ Requires-Dist: kaleido==0.2.1
680
680
  Requires-Dist: nbformat<6.0.0,>=5.10.4
681
681
  Requires-Dist: pyproj<4.0.0,>=3.7.0
682
682
  Requires-Dist: notebook<8.0.0,>=7.3.2
683
- Requires-Dist: boto3<2.0.0,>=1.36.16
683
+ Requires-Dist: boto3>=1.40.0
684
684
  Requires-Dist: geemap
685
- Requires-Dist: uv
686
685
  Requires-Dist: seaborn
687
686
  Provides-Extra: dev
688
687
  Requires-Dist: pytest; extra == "dev"
@@ -712,7 +711,7 @@ To address these issues, we developed Flood Inundation Mapping Prediction Evalua
712
711
 
713
712
 
714
713
 
715
- ### **Repository structure**
714
+ ### Repository structure
716
715
  <hr style="border: 1px solid black; margin: 0;">
717
716
 
718
717
  The architecture of the ```fimeval``` integrates different modules to which helps the automation of flood evaluation. All those modules codes are in source (```src``` ) folder.
@@ -744,7 +743,7 @@ The graphical representation of fimeval pipeline can be summarized as follows in
744
743
  </div>
745
744
  Figure 1: Flowchart showing the entire framework pipeline.
746
745
 
747
- ### **Framework Installation and Usage**
746
+ ### Framework Installation and Usage
748
747
  <hr style="border: 1px solid black; margin: 0;">
749
748
 
750
749
  This framework is published as a python package in PyPI (https://pypi.org/project/fimeval/).For directly using the package, the user can install this package using python package installer 'pip' and can import on their workflows:
@@ -765,7 +764,9 @@ import fimeval as fp
765
764
  ```
766
765
  **Note: The framework usage provided in detailed in [Here (docs/fimeval_usage.ipynb)](./docs/fimeval_usage.ipynb)**. It has detail documentation from installation, setup, running- until results.
767
766
 
768
- #### **Main Directory Structure**
767
+ ### Main Directory Structure
768
+ <hr style="border: 1px solid black; margin: 0;">
769
+
769
770
  The main directory contains the primary folder for storing the case studies. If there is one case study, user can directly pass the case study folder as the main directory. Each case study folder must include a Benchmark FIM (B-FIM) with a 'benchmark' word assigned within the B-FIM file and different Model Predicted FIM (M-FIM)
770
771
  in tif format.
771
772
  For mutilple case studies,the main directory could be structure in such a way that contain the seperate folders for individual case studies.For example, if a user has two case studies they should create two seperate folders as shown in the Figure below.
@@ -786,7 +787,7 @@ If user have more precise PWB, they can input their own PWB boundary as .shp and
786
787
  ```bash
787
788
  PWD_dir = Path('./path/to/PWB/vector/file')
788
789
  ```
789
- #### **Methods for Extracting Flood Extents**
790
+ #### Methods for Extracting Flood Extents
790
791
  1. **```smallest_extent```**
791
792
  The framework will first check all the raster extents (benchmark and FIMs). It will then determine the smallest among all the rasters. A shape file will then be created to mask all the rasters.
792
793
 
@@ -816,7 +817,7 @@ method_name = "AOI"
816
817
  AOI = Path('./path/to/AOI/vectorfile')
817
818
  ```
818
819
 
819
- #### **Executing the Evaluation framework**
820
+ #### Executing the Evaluation framework
820
821
  The complete description of different modules, what they are meant for, arguments taken to run that module and what will be the end results from each is described in below **Table 1**. If user import `fimeval` framework as `fp` into workflows, they can call each module mentioned in **Table 1** as `fp.Module_Name(args)`. Here arguments in italic represents the optional field, depending upon the user requirement.
821
822
 
822
823
  Table 1: Modules in `fimeval` are in order of execution.
@@ -834,8 +835,9 @@ Table 1: Modules in `fimeval` are in order of execution.
834
835
  Figure 4: Combined raw output from framework for different two method. First row (subplot a and b) and second row (subplot c and d) is contingency maps and evaluation metrics of FIM derived using `PrintContingencyMaP` and `PlotEvaluationMetrics` module. Third row (subplot e and f) is the output after processing and calculating of evaluation with BF by unsing `EvaluateWithBuildingFoorprint` module.
835
836
 
836
837
  ## Installation Instructions
838
+ <hr style="border: 1px solid black; margin: 0;">
837
839
 
838
- ### 1. Prerequisites
840
+ #### 1. Prerequisites
839
841
 
840
842
  Before installing `fimeval`, ensure the following software are installed:
841
843
 
@@ -849,13 +851,13 @@ Before installing `fimeval`, ensure the following software are installed:
849
851
 
850
852
  ---
851
853
 
852
- ### 2. Install Anaconda
854
+ #### 2. Install Anaconda
853
855
 
854
856
  If Anaconda is not installed, download and install it from the [official website](https://www.anaconda.com/products/distribution).
855
857
 
856
858
  ---
857
859
 
858
- ### 3. Set Up Virtual Environment
860
+ #### 3. Set Up Virtual Environment
859
861
 
860
862
  #### For Mac Users
861
863
 
@@ -873,24 +875,40 @@ uv pip install fimeval
873
875
  ```
874
876
 
875
877
  ### Google Colab Version
878
+ <hr style="border: 1px solid black; margin: 0;">
876
879
 
877
880
  To use fimeval in Google Colab, follow the steps below:
878
881
 
879
- ## Upload Files
882
+ #### Upload Files
880
883
  Upload all necessary input files (e.g., raster, shapefiles, model outputs) to your Google Drive.
881
- ## Open Google Colab
884
+ #### Open Google Colab
882
885
  Go to Google Colab and sign in with a valid Google account.
883
- ## Mount Google Drive
886
+ #### Mount Google Drive
884
887
  In a new Colab notebook, mount the Google Drive
885
888
  ```bash
886
889
  pip install fimeval
887
890
  ```
888
- ### **Acknowledgements**
891
+ ### Citing our work
892
+ <hr style="border: 1px solid black; margin: 0;">
893
+
894
+ - Devi, D., Dipsikha, Supath Dhital, Dinuke Munasinghe, Sagy Cohen, Anupal Baruah, Yixian Chen, Dan Tian, & Carson Pruitt (2025).
895
+ *A framework for the evaluation of flood inundation predictions over extensive benchmark databases.*
896
+ **Environmental Modelling & Software**, 106786.
897
+ https://doi.org/10.1016/j.envsoft.2025.106786
898
+
899
+ - Cohen, S., Baruah, A., Nikrou, P., Tian, D., & Liu, H. (2025).
900
+ *Toward robust evaluations of flood inundation predictions using remote sensing–derived benchmark maps.*
901
+ **Water Resources Research**, 61(8).
902
+ https://doi.org/10.1029/2024WR039574
903
+
904
+ ### Acknowledgements
905
+ <hr style="border: 1px solid black; margin: 0;">
906
+
889
907
  | | |
890
908
  | --- | --- |
891
909
  | ![alt text](https://ciroh.ua.edu/wp-content/uploads/2022/08/CIROHLogo_200x200.png) | Funding for this project was provided by the National Oceanic & Atmospheric Administration (NOAA), awarded to the Cooperative Institute for Research to Operations in Hydrology (CIROH) through the NOAA Cooperative Agreement with The University of Alabama.
892
910
 
893
- ### **For More Information**
911
+ ### For More Information
894
912
  Contact <a href="https://geography.ua.edu/people/sagy-cohen/" target="_blank">Sagy Cohen</a>
895
913
  (sagy.cohen@ua.edu)
896
914
  Dipsikha Devi, (ddevi@ua.edu)
@@ -0,0 +1,21 @@
1
+ fimeval/__init__.py,sha256=0teuordtccJzkqE0Xr9Q-ePd631vC46KOH-9Y5n5dxU,711
2
+ fimeval/setup_benchFIM.py,sha256=3HqhzlKy46Ef1-YiLo5HBKDvj68cqw6CNWSIBTn_T00,1308
3
+ fimeval/utilis.py,sha256=i9NNzZ2AN0oCbyIc1oeKc6YmKdYgT9A5Ab5wB3W2BmE,8284
4
+ fimeval/BenchFIMQuery/__init__.py,sha256=tuylN2ZBjR6ezqG6v4OmlyGcJzaVIj539x_JUKVwjpY,78
5
+ fimeval/BenchFIMQuery/access_benchfim.py,sha256=59-LVNNHWjlJjLiilJyFRF15HwRdCXnlcVuK2Xa56BQ,27701
6
+ fimeval/BenchFIMQuery/utilis.py,sha256=A0q_dQjjG5sk7JpC6lvawj6E1FKUt8LIm5JXi46qkvQ,8543
7
+ fimeval/BuildingFootprint/__init__.py,sha256=oP9YWLdo6ANzSQFxYLv7Ku_26AY5NkLNhZLK28ICMLo,109
8
+ fimeval/BuildingFootprint/evaluationwithBF.py,sha256=3v5dZk7xL9h6emf36Cxlsq5DJ1mYA5FgWpX4i97VPFg,18238
9
+ fimeval/BuildingFootprint/microsoftBF.py,sha256=oWfAr38DYJLxjyJIDgQDOYGwYoIEVL-R_h4EyiQAWlU,4226
10
+ fimeval/ContingencyMap/PWBs3.py,sha256=UFICxO58c2fA9mIffH4ooqphv3ZXe6yX8QzpRjtI6fs,1275
11
+ fimeval/ContingencyMap/__init__.py,sha256=ckps2dyg6aci3TA-3P7oTMcCAcSTz9AA6sndHtZEwdE,259
12
+ fimeval/ContingencyMap/evaluationFIM.py,sha256=-O6aiQdsPOhw5OLQ-m45v8D6LZ58WNE7EG9g27z75Us,18825
13
+ fimeval/ContingencyMap/methods.py,sha256=kbutfo9FUH-yjvnOXxwLpdErUuebMJ8NjCroNWIYCjo,3299
14
+ fimeval/ContingencyMap/metrics.py,sha256=jwOia0Nl7aU7AuGJFAcQ4fVENnp2G_5W6JSJBzo1-_4,1094
15
+ fimeval/ContingencyMap/plotevaluationmetrics.py,sha256=AbR43fnz0mbs5a7o3-ccAj-fa5RRWG4rS3xav58_M-k,4900
16
+ fimeval/ContingencyMap/printcontingency.py,sha256=-1H_Ze2TbRSER7vy7bd0HvxnziNzPPOIPOm2YhB7r4A,5422
17
+ fimeval-0.1.57.dist-info/licenses/LICENSE.txt,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
18
+ fimeval-0.1.57.dist-info/METADATA,sha256=rUf0Gul5YPLzww26KTSzyY21R83qpEoOt0IyeYzwFhA,56941
19
+ fimeval-0.1.57.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
+ fimeval-0.1.57.dist-info/top_level.txt,sha256=F4QW50msI8sRrX_DK3NQ-s3swQ4-2_5Ty3mfm9ZMc6k,8
21
+ fimeval-0.1.57.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- fimeval/__init__.py,sha256=HZJKq7XEhL6TnwFkhpf8NcEQ5h7zPQ3XJh3z5gF0gQ8,603
2
- fimeval/utilis.py,sha256=KdU6pSMS8dPf1zFyS-SOUdDSJr87IQ28uSG3ZuVfq1A,6782
3
- fimeval/BuildingFootprint/__init__.py,sha256=oP9YWLdo6ANzSQFxYLv7Ku_26AY5NkLNhZLK28ICMLo,109
4
- fimeval/BuildingFootprint/evaluationwithBF.py,sha256=84s6y-S3GAuclgAEXQ4DmBAtnnZ3t0LFhIPjeSMtzPw,17224
5
- fimeval/BuildingFootprint/microsoftBF.py,sha256=73M_e_n_3lsejsnP9eX06hou0sr2-yaWdjuoUwZ8O2Y,4063
6
- fimeval/ContingencyMap/PWBs3.py,sha256=UFICxO58c2fA9mIffH4ooqphv3ZXe6yX8QzpRjtI6fs,1275
7
- fimeval/ContingencyMap/__init__.py,sha256=ckps2dyg6aci3TA-3P7oTMcCAcSTz9AA6sndHtZEwdE,259
8
- fimeval/ContingencyMap/evaluationFIM.py,sha256=MTh9W5M-g43uzgW8ci0GJ65KKCEsepbqiE8o8Ox0Pj4,17270
9
- fimeval/ContingencyMap/methods.py,sha256=kbutfo9FUH-yjvnOXxwLpdErUuebMJ8NjCroNWIYCjo,3299
10
- fimeval/ContingencyMap/metrics.py,sha256=jwOia0Nl7aU7AuGJFAcQ4fVENnp2G_5W6JSJBzo1-_4,1094
11
- fimeval/ContingencyMap/plotevaluationmetrics.py,sha256=CLw3y3XB3XhGm_X2_U3JSlPq0DYKxATNSW7lKPvMkMA,4838
12
- fimeval/ContingencyMap/printcontingency.py,sha256=-1H_Ze2TbRSER7vy7bd0HvxnziNzPPOIPOm2YhB7r4A,5422
13
- fimeval-0.1.55.dist-info/licenses/LICENSE.txt,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
14
- fimeval-0.1.55.dist-info/METADATA,sha256=A_OiFx3NhXA_t9P0mW3qYGFchKw6UlGKuOwXFv7YzYk,56113
15
- fimeval-0.1.55.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
16
- fimeval-0.1.55.dist-info/top_level.txt,sha256=F4QW50msI8sRrX_DK3NQ-s3swQ4-2_5Ty3mfm9ZMc6k,8
17
- fimeval-0.1.55.dist-info/RECORD,,