fimeval 0.1.44__tar.gz → 0.1.46__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fimeval-0.1.44 → fimeval-0.1.46}/PKG-INFO +2 -2
- {fimeval-0.1.44 → fimeval-0.1.46}/pyproject.toml +1 -1
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/BuildingFootprint/evaluationwithBF.py +26 -41
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/ContingencyMap/evaluationFIM.py +0 -4
- fimeval-0.1.46/src/fimeval/ContingencyMap/metrics.py +43 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/ContingencyMap/printcontingency.py +8 -7
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/utilis.py +1 -2
- fimeval-0.1.44/src/fimeval/ContingencyMap/metrics.py +0 -44
- {fimeval-0.1.44 → fimeval-0.1.46}/LICENSE.txt +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/README.md +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/BuildingFootprint/__init__.py +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/ContingencyMap/PWBs3.py +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/ContingencyMap/__init__.py +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/ContingencyMap/methods.py +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/ContingencyMap/plotevaluationmetrics.py +0 -0
- {fimeval-0.1.44 → fimeval-0.1.46}/src/fimeval/__init__.py +0 -0
|
@@ -21,18 +21,6 @@ def Changeintogpkg(input_path, output_dir, layer_name):
|
|
|
21
21
|
gdf.to_file(output_gpkg, driver="GPKG")
|
|
22
22
|
return output_gpkg
|
|
23
23
|
|
|
24
|
-
|
|
25
|
-
def Changeintogpkg(input_path, output_dir, layer_name):
|
|
26
|
-
input_path = str(input_path)
|
|
27
|
-
if input_path.endswith(".gpkg"):
|
|
28
|
-
return input_path
|
|
29
|
-
else:
|
|
30
|
-
gdf = gpd.read_file(input_path)
|
|
31
|
-
output_gpkg = os.path.join(output_dir, f"{layer_name}.gpkg")
|
|
32
|
-
gdf.to_file(output_gpkg, driver="GPKG")
|
|
33
|
-
return output_gpkg
|
|
34
|
-
|
|
35
|
-
|
|
36
24
|
def GetFloodedBuildingCountInfo(
|
|
37
25
|
building_fp_path,
|
|
38
26
|
study_area_path,
|
|
@@ -51,8 +39,17 @@ def GetFloodedBuildingCountInfo(
|
|
|
51
39
|
building_gdf = gpd.read_file(building_fp_gpkg)
|
|
52
40
|
study_area_gdf = gpd.read_file(study_area_path)
|
|
53
41
|
|
|
54
|
-
|
|
55
|
-
|
|
42
|
+
with rasterio.open(raster1_path) as src:
|
|
43
|
+
target_crs = str(src.crs)
|
|
44
|
+
|
|
45
|
+
# Reproject all GeoDataFrames to the target CRS
|
|
46
|
+
if building_gdf.crs != target_crs:
|
|
47
|
+
building_gdf = building_gdf.to_crs(target_crs)
|
|
48
|
+
print("reproject building_gdf")
|
|
49
|
+
|
|
50
|
+
if study_area_gdf.crs != target_crs:
|
|
51
|
+
study_area_gdf = study_area_gdf.to_crs(target_crs)
|
|
52
|
+
print("reproject study_area_gdf")
|
|
56
53
|
|
|
57
54
|
clipped_buildings = gpd.overlay(building_gdf, study_area_gdf, how="intersection")
|
|
58
55
|
clipped_buildings["centroid"] = clipped_buildings.geometry.centroid
|
|
@@ -85,7 +82,7 @@ def GetFloodedBuildingCountInfo(
|
|
|
85
82
|
elif pixel_value == 4:
|
|
86
83
|
centroid_counts["True Positive"] += 1
|
|
87
84
|
|
|
88
|
-
if "
|
|
85
|
+
if "bm" in str(raster1_path).lower():
|
|
89
86
|
count_centroids_in_raster(raster1_path, "Benchmark")
|
|
90
87
|
count_centroids_in_raster(raster2_path, "Candidate")
|
|
91
88
|
elif "candidate" in str(raster2_path).lower():
|
|
@@ -220,46 +217,32 @@ def GetFloodedBuildingCountInfo(
|
|
|
220
217
|
print(f"Performance metrics chart is saved as PNG at {output_path}")
|
|
221
218
|
fig.show()
|
|
222
219
|
|
|
223
|
-
|
|
224
220
|
def process_TIFF(
|
|
225
221
|
tif_files, contingency_files, building_footprint, boundary, method_path
|
|
226
222
|
):
|
|
227
223
|
benchmark_path = None
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
if len(tif_files) == 2:
|
|
231
|
-
for tif_file in tif_files:
|
|
232
|
-
if "benchmark" in tif_file.name.lower():
|
|
233
|
-
benchmark_path = tif_file
|
|
234
|
-
else:
|
|
235
|
-
candidate_path.append(tif_file)
|
|
224
|
+
candidate_paths = []
|
|
236
225
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
if
|
|
226
|
+
for tif_file in tif_files:
|
|
227
|
+
if "bm" in tif_file.name.lower() or "benchmark" in tif_file.name.lower():
|
|
228
|
+
if benchmark_path is None:
|
|
240
229
|
benchmark_path = tif_file
|
|
241
|
-
print(f"---Benchmark: {tif_file.name}---")
|
|
242
230
|
else:
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
for candidate in candidate_path:
|
|
231
|
+
candidate_paths.append(tif_file)
|
|
232
|
+
else:
|
|
233
|
+
candidate_paths.append(tif_file)
|
|
247
234
|
|
|
235
|
+
if benchmark_path and candidate_paths:
|
|
236
|
+
for candidate in candidate_paths:
|
|
248
237
|
matching_contingency_map = None
|
|
249
238
|
candidate_base_name = candidate.stem.replace("_clipped", "")
|
|
250
239
|
|
|
251
240
|
for contingency_file in contingency_files:
|
|
252
241
|
if candidate_base_name in contingency_file.name:
|
|
253
242
|
matching_contingency_map = contingency_file
|
|
254
|
-
print(
|
|
255
|
-
f"Found matching contingency map for candidate {candidate.name}: {contingency_file.name}"
|
|
256
|
-
)
|
|
257
243
|
break
|
|
258
244
|
|
|
259
245
|
if matching_contingency_map:
|
|
260
|
-
print(
|
|
261
|
-
f"---FIM evaluation with Building Footprint starts for {candidate.name}---"
|
|
262
|
-
)
|
|
263
246
|
GetFloodedBuildingCountInfo(
|
|
264
247
|
building_footprint,
|
|
265
248
|
boundary,
|
|
@@ -273,8 +256,11 @@ def process_TIFF(
|
|
|
273
256
|
print(
|
|
274
257
|
f"No matching contingency map found for candidate {candidate.name}. Skipping..."
|
|
275
258
|
)
|
|
276
|
-
|
|
277
|
-
|
|
259
|
+
elif not benchmark_path:
|
|
260
|
+
print("Warning: No benchmark file found.")
|
|
261
|
+
elif not candidate_paths:
|
|
262
|
+
print("Warning: No candidate files found.")
|
|
263
|
+
|
|
278
264
|
def find_existing_footprint(out_dir):
|
|
279
265
|
gpkg_files = list(Path(out_dir).glob("*.gpkg"))
|
|
280
266
|
return gpkg_files[0] if gpkg_files else None
|
|
@@ -337,7 +323,6 @@ def EvaluationWithBuildingFootprint(
|
|
|
337
323
|
)
|
|
338
324
|
else:
|
|
339
325
|
building_footprintMS = EX_building_footprint
|
|
340
|
-
|
|
341
326
|
process_TIFF(
|
|
342
327
|
tif_files,
|
|
343
328
|
contingency_files,
|
|
@@ -40,7 +40,6 @@ def evaluateFIM(
|
|
|
40
40
|
Merged = []
|
|
41
41
|
Unique = []
|
|
42
42
|
FAR_values = []
|
|
43
|
-
Dice_values = []
|
|
44
43
|
|
|
45
44
|
# Dynamically call the specified method
|
|
46
45
|
method = globals().get(method)
|
|
@@ -255,7 +254,6 @@ def evaluateFIM(
|
|
|
255
254
|
FPR,
|
|
256
255
|
merged,
|
|
257
256
|
FAR,
|
|
258
|
-
Dice,
|
|
259
257
|
) = evaluationmetrics(out_image1, out_image2_resized)
|
|
260
258
|
|
|
261
259
|
# Append values to the lists
|
|
@@ -275,7 +273,6 @@ def evaluateFIM(
|
|
|
275
273
|
Merged.append(merged)
|
|
276
274
|
Unique.append(unique_values)
|
|
277
275
|
FAR_values.append(FAR)
|
|
278
|
-
Dice_values.append(Dice)
|
|
279
276
|
|
|
280
277
|
results = {
|
|
281
278
|
"CSI_values": csi_values,
|
|
@@ -294,7 +291,6 @@ def evaluateFIM(
|
|
|
294
291
|
# 'Merged': Merged,
|
|
295
292
|
# 'Unique': Unique
|
|
296
293
|
"FAR_values": FAR_values,
|
|
297
|
-
"Dice_values": Dice_values,
|
|
298
294
|
}
|
|
299
295
|
for candidate_idx, candidate_path in enumerate(candidate_paths):
|
|
300
296
|
candidate_BASENAME = os.path.splitext(os.path.basename(candidate_path))[0]
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
# Get all the evaluation metrics
|
|
5
|
+
def evaluationmetrics(out_image1, out_image2):
|
|
6
|
+
merged = out_image1 + out_image2
|
|
7
|
+
unique_values, counts = np.unique(merged, return_counts=True)
|
|
8
|
+
class_pixel_counts = dict(zip(unique_values, counts))
|
|
9
|
+
class_pixel_counts
|
|
10
|
+
TN = class_pixel_counts.get(1,0)
|
|
11
|
+
FP = class_pixel_counts.get(2,0)
|
|
12
|
+
FN = class_pixel_counts.get(3,0)
|
|
13
|
+
TP = class_pixel_counts.get(4,0)
|
|
14
|
+
epsilon = 1e-8
|
|
15
|
+
TPR = TP / (TP + FN+epsilon)
|
|
16
|
+
FNR = FN / (TP + FN+epsilon)
|
|
17
|
+
Acc = (TP + TN) / (TP + TN + FP + FN+epsilon)
|
|
18
|
+
Prec = TP / (TP + FP+epsilon)
|
|
19
|
+
sen = TP / (TP + FN+epsilon)
|
|
20
|
+
F1_score = 2 * (Prec * sen) / (Prec + sen+epsilon)
|
|
21
|
+
CSI = TP / (TP + FN + FP+epsilon)
|
|
22
|
+
POD = TP / (TP + FN+epsilon)
|
|
23
|
+
FPR = FP / (FP + TN+epsilon)
|
|
24
|
+
FAR = FP / (TP + FP+epsilon)
|
|
25
|
+
|
|
26
|
+
return (
|
|
27
|
+
unique_values,
|
|
28
|
+
TN,
|
|
29
|
+
FP,
|
|
30
|
+
FN,
|
|
31
|
+
TP,
|
|
32
|
+
TPR,
|
|
33
|
+
FNR,
|
|
34
|
+
Acc,
|
|
35
|
+
Prec,
|
|
36
|
+
sen,
|
|
37
|
+
CSI,
|
|
38
|
+
F1_score,
|
|
39
|
+
POD,
|
|
40
|
+
FPR,
|
|
41
|
+
merged,
|
|
42
|
+
FAR,
|
|
43
|
+
)
|
|
@@ -18,12 +18,12 @@ def getContingencyMap(raster_path, method_path):
|
|
|
18
18
|
combined_flood = np.full_like(band1, fill_value=1, dtype=int)
|
|
19
19
|
|
|
20
20
|
# Map pixel values to colors
|
|
21
|
-
combined_flood[band1 == 5] =
|
|
22
|
-
combined_flood[band1 == 0] =
|
|
23
|
-
combined_flood[band1 == 1] =
|
|
24
|
-
combined_flood[band1 == 2] =
|
|
25
|
-
combined_flood[band1 == 3] =
|
|
26
|
-
combined_flood[band1 == 4] =
|
|
21
|
+
combined_flood[band1 == 5] = 5
|
|
22
|
+
combined_flood[band1 == 0] = 0
|
|
23
|
+
combined_flood[band1 == 1] = 1
|
|
24
|
+
combined_flood[band1 == 2] = 2
|
|
25
|
+
combined_flood[band1 == 3] = 3
|
|
26
|
+
combined_flood[band1 == 4] = 4
|
|
27
27
|
|
|
28
28
|
# Handle NoData explicitly, mapping it to "No Data" class (1)
|
|
29
29
|
if nodata_value is not None:
|
|
@@ -42,7 +42,7 @@ def getContingencyMap(raster_path, method_path):
|
|
|
42
42
|
ys_dd = np.array(latitudes).reshape(ys.shape)
|
|
43
43
|
|
|
44
44
|
# Define the color map and normalization
|
|
45
|
-
flood_colors = ["
|
|
45
|
+
flood_colors = ["white", "grey", "green", "blue", "red", "black"] # 6 classes
|
|
46
46
|
flood_cmap = mcolors.ListedColormap(flood_colors)
|
|
47
47
|
flood_norm = mcolors.BoundaryNorm(
|
|
48
48
|
boundaries=np.arange(-0.5, 6.5, 1), ncolors=len(flood_colors)
|
|
@@ -60,6 +60,7 @@ def getContingencyMap(raster_path, method_path):
|
|
|
60
60
|
|
|
61
61
|
# Create legend patches
|
|
62
62
|
value_labels = {
|
|
63
|
+
0: "No data",
|
|
63
64
|
1: "True negative",
|
|
64
65
|
2: "False positive",
|
|
65
66
|
3: "False negative",
|
|
@@ -132,6 +132,7 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
|
|
|
132
132
|
for src_path in tif_files:
|
|
133
133
|
dst_path = processing_folder / src_path.name
|
|
134
134
|
reprojectFIMs(str(src_path), str(dst_path), target_crs)
|
|
135
|
+
compress_tif_lzw(dst_path)
|
|
135
136
|
else:
|
|
136
137
|
all_within_conus = all(is_within_conus(bounds_list[i], crs_list[i]) for i in range(len(bounds_list)))
|
|
137
138
|
|
|
@@ -148,7 +149,6 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
|
|
|
148
149
|
for src_path in tif_files:
|
|
149
150
|
dst_path = processing_folder / src_path.name
|
|
150
151
|
shutil.copy(src_path, dst_path)
|
|
151
|
-
compress_tif_lzw(dst_path)
|
|
152
152
|
|
|
153
153
|
# Resolution check and resampling
|
|
154
154
|
processed_tifs = list(processing_folder.glob('*.tif'))
|
|
@@ -170,7 +170,6 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
|
|
|
170
170
|
for src_path in processed_tifs:
|
|
171
171
|
resample_to_resolution(str(src_path), target_resolution, target_resolution)
|
|
172
172
|
else:
|
|
173
|
-
print("FIMs are in different resolution after projection. \n")
|
|
174
173
|
coarser_x = max(res[0] for res in resolutions)
|
|
175
174
|
coarser_y = max(res[1] for res in resolutions)
|
|
176
175
|
print(f"Using coarser resolution: X={coarser_x}, Y={coarser_y}. Resampling all FIMS to this resolution.")
|
|
@@ -1,44 +0,0 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
# Get all the evaluation metrics
|
|
5
|
-
def evaluationmetrics(out_image1, out_image2):
|
|
6
|
-
merged = out_image1 + out_image2
|
|
7
|
-
unique_values, counts = np.unique(merged, return_counts=True)
|
|
8
|
-
class_pixel_counts = dict(zip(unique_values, counts))
|
|
9
|
-
class_pixel_counts
|
|
10
|
-
TN = class_pixel_counts[1]
|
|
11
|
-
FP = class_pixel_counts[2]
|
|
12
|
-
FN = class_pixel_counts[3]
|
|
13
|
-
TP = class_pixel_counts[4]
|
|
14
|
-
TPR = TP / (TP + FN)
|
|
15
|
-
FNR = FN / (TP + FN)
|
|
16
|
-
Acc = (TP + TN) / (TP + TN + FP + FN)
|
|
17
|
-
Prec = TP / (TP + FP)
|
|
18
|
-
sen = TP / (TP + FN)
|
|
19
|
-
F1_score = 2 * (Prec * sen) / (Prec + sen)
|
|
20
|
-
CSI = TP / (TP + FN + FP)
|
|
21
|
-
POD = TP / (TP + FN)
|
|
22
|
-
FPR = FP / (FP + TN)
|
|
23
|
-
FAR = FP / (TP + FP)
|
|
24
|
-
Dice = 2 * TP / (2 * TP + FP + FN)
|
|
25
|
-
|
|
26
|
-
return (
|
|
27
|
-
unique_values,
|
|
28
|
-
TN,
|
|
29
|
-
FP,
|
|
30
|
-
FN,
|
|
31
|
-
TP,
|
|
32
|
-
TPR,
|
|
33
|
-
FNR,
|
|
34
|
-
Acc,
|
|
35
|
-
Prec,
|
|
36
|
-
sen,
|
|
37
|
-
CSI,
|
|
38
|
-
F1_score,
|
|
39
|
-
POD,
|
|
40
|
-
FPR,
|
|
41
|
-
merged,
|
|
42
|
-
FAR,
|
|
43
|
-
Dice,
|
|
44
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|