fimeval 0.1.57__py3-none-any.whl → 0.1.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fimeval/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
  from .ContingencyMap.evaluationFIM import EvaluateFIM
3
3
  from .ContingencyMap.printcontingency import PrintContingencyMap
4
4
  from .ContingencyMap.plotevaluationmetrics import PlotEvaluationMetrics
5
- from .ContingencyMap.PWBs3 import get_PWB
5
+ from .ContingencyMap.water_bodies import get_PWB, ExtractPWB
6
6
 
7
7
  # Utility modules
8
8
  from .utilis import compress_tif_lzw
@@ -10,9 +10,12 @@ from .utilis import compress_tif_lzw
10
10
  # Evaluation with Building foorprint module
11
11
  from .BuildingFootprint.evaluationwithBF import EvaluationWithBuildingFootprint
12
12
 
13
- #Access benchmark FIM module
13
+ # Access benchmark FIM module
14
14
  from .BenchFIMQuery.access_benchfim import benchFIMquery
15
15
 
16
+ # Building Footprint module
17
+ from .BuildingFootprint.arcgis_API import getBuildingFootprint
18
+
16
19
  __all__ = [
17
20
  "EvaluateFIM",
18
21
  "PrintContingencyMap",
@@ -21,4 +24,6 @@ __all__ = [
21
24
  "EvaluationWithBuildingFootprint",
22
25
  "compress_tif_lzw",
23
26
  "benchFIMquery",
27
+ "getBuildingFootprint",
28
+ "ExtractPWB",
24
29
  ]
fimeval/setup_benchFIM.py CHANGED
@@ -4,11 +4,13 @@ Basically It will do everything before going into the actual evaluation process.
4
4
  Author: Supath Dhital
5
5
  Date updated: 25 Nov, 2025
6
6
  """
7
+
7
8
  from pathlib import Path
8
9
 
9
10
  from .BenchFIMQuery.access_benchfim import benchFIMquery
10
11
  from .utilis import benchmark_name
11
12
 
13
+
12
14
  def ensure_benchmark(folder_dir, tif_files, benchmark_map):
13
15
  """
14
16
  If no local benchmark is found in `tif_files`, and `folder_dir.name`
fimeval/utilis.py CHANGED
@@ -184,8 +184,9 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
184
184
  else:
185
185
  print("All rasters already have the same resolution. No resampling needed.")
186
186
 
187
- #Function to find the best boundary file in the folder if multiple boundary files are present
188
- def find_best_boundary(folder: Path, benchmark_path: Path):
187
+
188
+ # Function to find the best boundary file in the folder if multiple boundary files are present
189
+ def find_best_boundary(folder: Path, benchmark_path: Path):
189
190
  """
190
191
  Choose the best boundary file in `folder`:
191
192
  - prefer .gpkg (from benchFIM downloads),
@@ -207,11 +208,9 @@ def find_best_boundary(folder: Path, benchmark_path: Path):
207
208
  )
208
209
 
209
210
  def score(path: Path):
210
- name_tokens = set(
211
- t for t in re.split(r"[_\-\.\s]+", path.stem.lower()) if t
212
- )
211
+ name_tokens = set(t for t in re.split(r"[_\-\.\s]+", path.stem.lower()) if t)
213
212
  common = len(bench_tokens & name_tokens)
214
- bonus = 1 if path.suffix.lower() == ".gpkg" else 0
213
+ bonus = 1 if path.suffix.lower() == ".gpkg" else 0
215
214
  return (common, bonus)
216
215
 
217
216
  best = max(candidates, key=score)
@@ -219,7 +218,7 @@ def find_best_boundary(folder: Path, benchmark_path: Path):
219
218
  return best
220
219
 
221
220
 
222
- #To test whether the tif is benchmark or not
221
+ # To test whether the tif is benchmark or not
223
222
  def benchmark_name(f: Path) -> bool:
224
223
  name = f.stem.lower()
225
224
 
@@ -230,4 +229,3 @@ def benchmark_name(f: Path) -> bool:
230
229
  # Treating underscores/dashes/dots as separators and look for a 'bm' token
231
230
  tokens = re.split(r"[_\-\.\s]+", name)
232
231
  return "bm" in tokens
233
-
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fimeval
3
- Version: 0.1.57
3
+ Version: 0.1.58
4
4
  Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
5
5
  Author: Surface Dynamics Modeling Lab
6
6
  Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
@@ -720,11 +720,15 @@ fimeval/
720
720
  ├── docs/ # Documentation (contains 'FIMserv' Tool usage sample codes)
721
721
  │ └── sampledata/ # Contains the sample data to demonstrate how this frameworks works
722
722
  │ └── fimeval_usage.ipynb #Sample code usage of the Evaluation framework
723
+ │ └── fimbench_usage.ipynb #Sample code usage of the FIMbench Query and getting benchmark dataset
723
724
  ├── Images/ # have sample images for documentation
724
725
  ├── src/
725
- │ └── fimeval/
726
+ │ └── fimeval/
727
+ │ ├──BenchFIMQuery/ #Module to interact with the extensive FIMdatabase, hosted in AWS S3
728
+ │ │ └── access_benchfim.py #Different classes to query right benchmark FIM for any given location and set of filter
729
+ │ │ └── utilis.py #Support utility
726
730
  │ ├──BuildingFootprint/ # Contains the evaluation of model predicted FIM with microsoft building footprint
727
- │ │ └── microsoftBF.py
731
+ │ │ └── arcgis_API.py #seamless integration of building footprint in evaluation through ArcGIS REST API
728
732
  │ │ └── evaluationwithBF.py
729
733
  │ └── ContingencyMap/ # Contains all the metrics calculation and contingency map generation
730
734
  │ │ ├── evaluationFIM.py # main evaluation moodule
@@ -732,14 +736,14 @@ fimeval/
732
736
  │ │ └── metrics.py # metrics calculation module
733
737
  │ │ └── plotevaluationmetrics.py # use to vizualize the different performance metrics
734
738
  │ │ └── printcontingency.py # prints the contingency map to quickly generate the Map layout
735
- │ │ └── PWBs3.py # module which helps to get permanent water bodies from s3 bucket
739
+ │ │ └── water_bodies.py # module which to get permanent water bodies from s3 bucket and ArcGIS REST API
736
740
  │ └── utilis.py #Includes the resampling and reprojection of FIMs
737
741
  └── tests/ # Includes test cases for different functionality
738
742
  ```
739
743
  The graphical representation of fimeval pipeline can be summarized as follows in **```Figure 1```**. Here, it will show all the steps incorporated within the ```fimeval``` during packaging and all functionality are interconnected to each other, resulting the automation of the framework.
740
744
 
741
745
  <div align="center">
742
- <img width="900" alt="image" src="./Images/flowchart.jpg">
746
+ <img width="800" alt="image" src="./Images/flowchart.jpg">
743
747
  </div>
744
748
  Figure 1: Flowchart showing the entire framework pipeline.
745
749
 
@@ -781,7 +785,7 @@ main_dir = Path('./path/to/main/dir')
781
785
  ```
782
786
 
783
787
  #### **Permanent Water Bodies (PWB)**
784
- This framework uses PWB to first to delineate the PWB in the FIM and assign into different class so that the evaluation will be more fair. For the Contiguous United States (CONUS), the PWB is already integrated within the framework however, if user have more accurate PWB or using fimeval for outside US they can initialize and use PWB within fimeval framework. Currently it is using PWB publicly hosted by ESRI: https://hub.arcgis.com/datasets/esri::usa-detailed-water-bodies/about
788
+ This framework uses PWB to first to delineate the PWB in the FIM and assign into different class so that the evaluation will be more fair. For the Contiguous United States (CONUS), the PWB is already integrated within the framework however, if user have more accurate PWB or using fimeval for outside US they can initialize and use PWB within fimeval framework. Currently it is using PWB publicly hosted by ESRI through REST API: https://hub.arcgis.com/datasets/esri::usa-detailed-water-bodies/about
785
789
 
786
790
  If user have more precise PWB, they can input their own PWB boundary as .shp and .gpkg format and need to assign the shapefile of the PWB and define directory as,
787
791
  ```bash
@@ -826,7 +830,7 @@ Table 1: Modules in `fimeval` are in order of execution.
826
830
  | `EvaluateFIM` | It runs all the evaluation of FIM between B-FIM and M-FIMs. | `main_dir`: Main directory containing the case study folders, <br> `method_name`: How users wants to evaluate their FIM, <br> `outpur_dir`: Output directory where all the results and the intermidiate files will be saved for further calculation, <br> *`PWB_dir`*: The permanenet water bodies vectory file directory if user wants to user their own boundary, <br> *`target_crs`*: this fimeval framework needs the floodmaps to be in projected CRS so define the projected CRS in epsg code format, <br> *`target_resolution`*: sometime if the benchmark is very high resolution than candidate FIMs, it needs heavy computational time, so user can define the resolution if there FIMs are in different spatial resolution, else it will use the coarser resolution among all FIMS within that case. |The outputs includes generated files in TIFF, SHP, CSV, and PNG formats, all stored within the output folder. Users can visualize the TIFF files using any geospatial platform. The TIFF files consist of the binary Benchmark-FIM (Benchmark.tif), Model-FIM (Candidate.tif), and Agreement-FIM (Contingency.tif). The shp files contain the boundary of the generated flood extent.|
827
831
  | `PlotContingencyMap` | For better understanding, It will print the agreement maps derived in first step. | `main_dir`, `method_name`, `output_dir` : Based on the those arguments, once all the evaluation is done, it will dynamically get the corresponding contingency raster for printing.| This prints the contingency map showing different class of evaluation (TP, FP, no data, PWB etc). The outputs look like- Figure 4 first row.|
828
832
  | `PlotEvaluationMetrics` | For quick understanding of the evaluation metrics, to plot bar of evaluation scores. | `main_dir`, `method_name`, `output_dir` : Based on the those arguments, once all the evaluation is done, it will dynamically get the corresponding file for printing based on all those info.| This prints the bar plots which includes different performance metrics calculated by EvaluateFIM module. The outputs look like- Figure 4 second row.|
829
- | `EvaluationWithBuildingFootprint` | For Building Footprint Analysis, user can specify shapefile of building footprints as .shp or .gpkg format. By default it consider global Microsoft building footprint dataset. Those data are hosted in Google Earth Engine (GEE) so, It pops up to authenticate the GEE account, please allow it and it will download the data based on evaluation boundary and evaluation is done. | `main_dir`, `method_name`, `output_dir`: Those arguments are as it is, same as all other modules. <br> *`building_footprint`*: If user wants to use their own building footprint file then pass the directory here, *`country`*: It is the 3 letter based country ISO code (eg. 'USA', NEP' etc), for the building data automation using GEE based on the evaluation extent, *`shapefile_dir`*: this is the directory of user defined AOI if user is working with their own boundary and automatic Building footprint download and evaluation, *`geeprojectID`*: this is the google earth engine google cloud project ID, which helps to access the GEE data and resources to work with building footprint download and process. | It will calculate the different metrics (e.g. TP, FP, CSI, F1, Accuracy etc) based on hit and miss of building on different M-FIM and B-FIM. Those all metrics will be saved as CSV format in `output_dir` and finally using that info it prints the counts of building foorpint in each FIMs as well as scenario on the evaluation end via bar plot.|
833
+ | `EvaluationWithBuildingFootprint` | For Building Footprint Analysis, user can specify shapefile of building footprints as .shp or .gpkg format. By default it consider global Microsoft building footprint dataset hosted in ArcGIS Online. It is seamlessly integrated within framework through ArcGIS REST API. | `main_dir`, `method_name`, `output_dir`: Those arguments are as it is, same as all other modules. <br> *`building_footprint`*: If user wants to use their own building footprint file then pass the directory here, *`shapefile_dir`*: this is the directory of user defined AOI if user is working with their own boundary and automatic Building footprint download and evaluation,| It will calculate the different metrics (e.g. TP, FP, CSI, F1, Accuracy etc) based on hit and miss of building on different M-FIM and B-FIM. Those all metrics will be saved as CSV format in `output_dir` and finally using that info it prints the counts of building foorpint in each FIMs as well as scenario on the evaluation end via bar plot.|
830
834
 
831
835
  <p align="center">
832
836
  <img src="./Images/methodsresults_combined.jpg" width="750" />
@@ -909,7 +913,9 @@ pip install fimeval
909
913
  | ![alt text](https://ciroh.ua.edu/wp-content/uploads/2022/08/CIROHLogo_200x200.png) | Funding for this project was provided by the National Oceanic & Atmospheric Administration (NOAA), awarded to the Cooperative Institute for Research to Operations in Hydrology (CIROH) through the NOAA Cooperative Agreement with The University of Alabama.
910
914
 
911
915
  ### For More Information
916
+
912
917
  Contact <a href="https://geography.ua.edu/people/sagy-cohen/" target="_blank">Sagy Cohen</a>
913
918
  (sagy.cohen@ua.edu)
919
+ Supath Dhital, (sdhital@crimson.ua.edu)
914
920
  Dipsikha Devi, (ddevi@ua.edu)
915
- Supath Dhital, (sdhital@crimson.ua.edu)
921
+
@@ -0,0 +1,21 @@
1
+ fimeval/__init__.py,sha256=XzCEx8Pkf_nYA1NY-cuj2MWvdOij5n0pVRtQtwc0Rp4,869
2
+ fimeval/setup_benchFIM.py,sha256=lmMaXq0yn-tRlEiXIda9ywoNFbwR8X4Tiy8MVSgPKxE,1310
3
+ fimeval/utilis.py,sha256=hC7e6ZOXN1RhnpvJmSLrMJXHSLcVJcllBLnG4rGWExE,8261
4
+ fimeval/BenchFIMQuery/__init__.py,sha256=_Ng7sOKNQrcXnoD9MfdvntNcEDRUs3VLA4RmUuil6dU,79
5
+ fimeval/BenchFIMQuery/access_benchfim.py,sha256=qqXlC82SSB9r0telG6HWXI39ohJAw4wxj5p6aAaojJU,28724
6
+ fimeval/BenchFIMQuery/utilis.py,sha256=JMTDf9bOmfEFC6562CiPNb2SsIi1qd_TT34UvM9i8ys,10480
7
+ fimeval/BuildingFootprint/__init__.py,sha256=ncVUP0lT69wrUiwpq5AvM5-8gA9ohlQmMUK_wrcZJwk,178
8
+ fimeval/BuildingFootprint/arcgis_API.py,sha256=F4AuDorU6PlZrHYRtzJx7RnT4un75cz36nmQTID8ooM,6180
9
+ fimeval/BuildingFootprint/evaluationwithBF.py,sha256=AesPK5i3uPCKjcFK3N29Kq1DBbdPWczHbs0pJTPXk5o,16749
10
+ fimeval/ContingencyMap/__init__.py,sha256=8olBcQzgK2Z7OU-9IRH6dXsOyuMvmTefDze-iwBsy5I,292
11
+ fimeval/ContingencyMap/evaluationFIM.py,sha256=HVcwTRBf_O0_CKslMZ365xxWD4pVh8Syz1XlQMbbfdk,19854
12
+ fimeval/ContingencyMap/methods.py,sha256=kbutfo9FUH-yjvnOXxwLpdErUuebMJ8NjCroNWIYCjo,3299
13
+ fimeval/ContingencyMap/metrics.py,sha256=jwOia0Nl7aU7AuGJFAcQ4fVENnp2G_5W6JSJBzo1-_4,1094
14
+ fimeval/ContingencyMap/plotevaluationmetrics.py,sha256=AbR43fnz0mbs5a7o3-ccAj-fa5RRWG4rS3xav58_M-k,4900
15
+ fimeval/ContingencyMap/printcontingency.py,sha256=rTCwL2A-nuIU3ZdRoyFokwfhnUCPx3_PAeFjtMvqRJs,5471
16
+ fimeval/ContingencyMap/water_bodies.py,sha256=tPf5R-8BNfARsJsecWlX0VPH9EU16utUOAxB4eQ9XHE,6605
17
+ fimeval-0.1.58.dist-info/licenses/LICENSE.txt,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
18
+ fimeval-0.1.58.dist-info/METADATA,sha256=JGKtnyP9IrH7Qvlhg6o47zsG7jBWXJJOQ_1Y4mU8Sg4,57043
19
+ fimeval-0.1.58.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
20
+ fimeval-0.1.58.dist-info/top_level.txt,sha256=F4QW50msI8sRrX_DK3NQ-s3swQ4-2_5Ty3mfm9ZMc6k,8
21
+ fimeval-0.1.58.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,134 +0,0 @@
1
- # Importing necessary libraries
2
- import geemap
3
- import ee
4
- import os
5
- from shapely.geometry import box
6
- import pandas as pd
7
- from pathlib import Path
8
- import geopandas as gpd
9
- from pyspark.sql import SparkSession
10
- from shapely.wkt import loads
11
- import shutil
12
-
13
- # Suppress the warnings
14
- import warnings
15
-
16
- warnings.filterwarnings("ignore")
17
-
18
- # Authenticate and initialize Earth Engine
19
- ee.Authenticate()
20
-
21
-
22
- # %%
23
- def split_into_tiles(boundary, tile_size=0.1):
24
- bounds = boundary.total_bounds
25
- x_min, y_min, x_max, y_max = bounds
26
- tiles = []
27
- x = x_min
28
- while x < x_max:
29
- y = y_min
30
- while y < y_max:
31
- tile = box(x, y, x + tile_size, y + tile_size)
32
- if tile.intersects(boundary.unary_union):
33
- tiles.append(tile)
34
- y += tile_size
35
- x += tile_size
36
- return gpd.GeoDataFrame(geometry=tiles, crs=boundary.crs)
37
-
38
-
39
- # Merge the final geojson files
40
- def mergeGeoJSONfiles(output_dir, merged_file):
41
- output_dir = Path(output_dir)
42
- files = list(output_dir.glob("*.geojson"))
43
- gdfs = [gpd.read_file(file) for file in files]
44
- merged_gdf = gpd.GeoDataFrame(pd.concat(gdfs, ignore_index=True), crs="EPSG:4326")
45
- merged_gdf.to_file(merged_file, driver="GPKG")
46
-
47
-
48
- # Process each batch with number of tiles
49
- def process_batch(partition, collection_name, output_dir, boundary_wkt, projectID=None):
50
- try:
51
- if projectID:
52
- ee.Initialize(project=projectID)
53
- else:
54
- ee.Initialize()
55
-
56
- except Exception:
57
- print("To initialize, please provide the earth engine project ID")
58
-
59
- # Convert WKT boundary to geometry
60
- boundary = loads(boundary_wkt)
61
- results = []
62
-
63
- for tile_wkt in partition:
64
- try:
65
- tile = loads(tile_wkt)
66
- aoi = ee.Geometry(tile.__geo_interface__)
67
- collection = ee.FeatureCollection(collection_name).filterBounds(aoi)
68
-
69
- # Download features and filter by boundary
70
- gdf = geemap.ee_to_gdf(collection)
71
- gdf = gdf[gdf.geometry.intersects(boundary)]
72
-
73
- # Save each tile as a GeoJSON file
74
- tile_id = f"tile_{hash(tile)}"
75
- output_file = Path(output_dir) / f"{tile_id}.geojson"
76
- gdf.to_file(output_file, driver="GeoJSON")
77
- results.append(f"Saved: {output_file}")
78
- except Exception as e:
79
- results.append(f"Error processing tile: {e}")
80
-
81
- return results
82
-
83
-
84
- def getBuildingFootprintSpark(
85
- countryISO, boundary_file, out_dir, tile_size, projectID=None
86
- ):
87
- spark = SparkSession.builder.appName("BuildingFootprints").getOrCreate()
88
-
89
- # Make temporary directory
90
- temp_dir = out_dir / "temp"
91
- temp_dir.mkdir(parents=True, exist_ok=True)
92
-
93
- # Load and process boundary
94
- boundary = gpd.read_file(boundary_file).to_crs("EPSG:4326")
95
- tiles = split_into_tiles(boundary, tile_size)
96
- boundary_wkt = boundary.unary_union.wkt
97
-
98
- collection_names = [f"projects/sat-io/open-datasets/VIDA_COMBINED/{countryISO}"]
99
-
100
- # Distribute processing
101
- for collection_name in collection_names:
102
- tiles_rdd = spark.sparkContext.parallelize(
103
- tiles.geometry.apply(lambda x: x.wkt).tolist(), numSlices=10
104
- )
105
- results = tiles_rdd.mapPartitions(
106
- lambda partition: process_batch(
107
- partition, collection_name, str(temp_dir), boundary_wkt, projectID
108
- )
109
- ).collect()
110
-
111
- # Merge GeoJSON files
112
- mergeGeoJSONfiles(temp_dir, out_dir / "building_footprint.gpkg")
113
-
114
- # Clean up the temp directory
115
- shutil.rmtree(temp_dir, ignore_errors=True)
116
-
117
- print(f"Building footprint data saved to {out_dir / 'building_footprint.gpkg'}")
118
-
119
-
120
- # %%
121
- # Export the building footprint
122
- def BuildingFootprintwithISO(countryISO, ROI, out_dir, geeprojectID=None):
123
- out_dir = Path(out_dir)
124
- out_dir.mkdir(parents=True, exist_ok=True)
125
- filename = out_dir / "building_footprint.gpkg"
126
-
127
- if filename.exists():
128
- os.remove(filename)
129
-
130
- getBuildingFootprintSpark(
131
- countryISO, ROI, out_dir, tile_size=0.05, projectID=geeprojectID
132
- )
133
-
134
- BuildingFootprintwithISO("USA", "/Users/supath/Downloads/S1A_9_6m_20190530T23573_910244W430506N_AOI.gpkg", "/Users/supath/Downloads/AOI", geeprojectID="supathdh")
@@ -1,42 +0,0 @@
1
- # import Libraries
2
- import geopandas as gpd
3
- import boto3
4
- import botocore
5
- import os
6
- import tempfile
7
-
8
- # Initialize an anonymous S3 client
9
- s3 = boto3.client(
10
- "s3", config=botocore.config.Config(signature_version=botocore.UNSIGNED)
11
- )
12
-
13
- bucket_name = "sdmlab"
14
- pwb_folder = "PWB/"
15
-
16
-
17
- def PWB_inS3(s3_client, bucket, prefix):
18
- """Download all components of a shapefile from S3 into a temporary directory."""
19
- tmp_dir = tempfile.mkdtemp()
20
- response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)
21
- if "Contents" not in response:
22
- raise ValueError("No files found in the specified S3 folder.")
23
-
24
- for obj in response["Contents"]:
25
- file_key = obj["Key"]
26
- file_name = os.path.basename(file_key)
27
- if file_name.endswith((".shp", ".shx", ".dbf", ".prj", ".cpg")):
28
- local_path = os.path.join(tmp_dir, file_name)
29
- s3_client.download_file(bucket, file_key, local_path)
30
-
31
- shp_files = [f for f in os.listdir(tmp_dir) if f.endswith(".shp")]
32
- if not shp_files:
33
- raise ValueError("No .shp file found after download.")
34
-
35
- shp_path = os.path.join(tmp_dir, shp_files[0])
36
- return shp_path
37
-
38
-
39
- def get_PWB():
40
- shp_path = PWB_inS3(s3, bucket_name, pwb_folder)
41
- pwb = gpd.read_file(shp_path)
42
- return pwb
@@ -1,21 +0,0 @@
1
- fimeval/__init__.py,sha256=0teuordtccJzkqE0Xr9Q-ePd631vC46KOH-9Y5n5dxU,711
2
- fimeval/setup_benchFIM.py,sha256=3HqhzlKy46Ef1-YiLo5HBKDvj68cqw6CNWSIBTn_T00,1308
3
- fimeval/utilis.py,sha256=i9NNzZ2AN0oCbyIc1oeKc6YmKdYgT9A5Ab5wB3W2BmE,8284
4
- fimeval/BenchFIMQuery/__init__.py,sha256=tuylN2ZBjR6ezqG6v4OmlyGcJzaVIj539x_JUKVwjpY,78
5
- fimeval/BenchFIMQuery/access_benchfim.py,sha256=59-LVNNHWjlJjLiilJyFRF15HwRdCXnlcVuK2Xa56BQ,27701
6
- fimeval/BenchFIMQuery/utilis.py,sha256=A0q_dQjjG5sk7JpC6lvawj6E1FKUt8LIm5JXi46qkvQ,8543
7
- fimeval/BuildingFootprint/__init__.py,sha256=oP9YWLdo6ANzSQFxYLv7Ku_26AY5NkLNhZLK28ICMLo,109
8
- fimeval/BuildingFootprint/evaluationwithBF.py,sha256=3v5dZk7xL9h6emf36Cxlsq5DJ1mYA5FgWpX4i97VPFg,18238
9
- fimeval/BuildingFootprint/microsoftBF.py,sha256=oWfAr38DYJLxjyJIDgQDOYGwYoIEVL-R_h4EyiQAWlU,4226
10
- fimeval/ContingencyMap/PWBs3.py,sha256=UFICxO58c2fA9mIffH4ooqphv3ZXe6yX8QzpRjtI6fs,1275
11
- fimeval/ContingencyMap/__init__.py,sha256=ckps2dyg6aci3TA-3P7oTMcCAcSTz9AA6sndHtZEwdE,259
12
- fimeval/ContingencyMap/evaluationFIM.py,sha256=-O6aiQdsPOhw5OLQ-m45v8D6LZ58WNE7EG9g27z75Us,18825
13
- fimeval/ContingencyMap/methods.py,sha256=kbutfo9FUH-yjvnOXxwLpdErUuebMJ8NjCroNWIYCjo,3299
14
- fimeval/ContingencyMap/metrics.py,sha256=jwOia0Nl7aU7AuGJFAcQ4fVENnp2G_5W6JSJBzo1-_4,1094
15
- fimeval/ContingencyMap/plotevaluationmetrics.py,sha256=AbR43fnz0mbs5a7o3-ccAj-fa5RRWG4rS3xav58_M-k,4900
16
- fimeval/ContingencyMap/printcontingency.py,sha256=-1H_Ze2TbRSER7vy7bd0HvxnziNzPPOIPOm2YhB7r4A,5422
17
- fimeval-0.1.57.dist-info/licenses/LICENSE.txt,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
18
- fimeval-0.1.57.dist-info/METADATA,sha256=rUf0Gul5YPLzww26KTSzyY21R83qpEoOt0IyeYzwFhA,56941
19
- fimeval-0.1.57.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
- fimeval-0.1.57.dist-info/top_level.txt,sha256=F4QW50msI8sRrX_DK3NQ-s3swQ4-2_5Ty3mfm9ZMc6k,8
21
- fimeval-0.1.57.dist-info/RECORD,,