fimeval 0.1.54__tar.gz → 0.1.55__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fimeval-0.1.54 → fimeval-0.1.55}/PKG-INFO +2 -1
- {fimeval-0.1.54 → fimeval-0.1.55}/pyproject.toml +3 -2
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/BuildingFootprint/evaluationwithBF.py +84 -61
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/ContingencyMap/evaluationFIM.py +2 -4
- fimeval-0.1.55/src/fimeval/ContingencyMap/plotevaluationmetrics.py +135 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/utilis.py +1 -5
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval.egg-info/PKG-INFO +2 -1
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval.egg-info/requires.txt +1 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/tests/test_evaluationfim.py +3 -1
- fimeval-0.1.54/src/fimeval/ContingencyMap/plotevaluationmetrics.py +0 -102
- {fimeval-0.1.54 → fimeval-0.1.55}/LICENSE.txt +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/README.md +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/setup.cfg +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/BuildingFootprint/__init__.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/BuildingFootprint/microsoftBF.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/ContingencyMap/PWBs3.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/ContingencyMap/__init__.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/ContingencyMap/methods.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/ContingencyMap/metrics.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/ContingencyMap/printcontingency.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval/__init__.py +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval.egg-info/SOURCES.txt +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval.egg-info/dependency_links.txt +0 -0
- {fimeval-0.1.54 → fimeval-0.1.55}/src/fimeval.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fimeval
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.55
|
|
4
4
|
Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
|
|
5
5
|
Author: Surface Dynamics Modeling Lab
|
|
6
6
|
Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
|
|
@@ -683,6 +683,7 @@ Requires-Dist: notebook<8.0.0,>=7.3.2
|
|
|
683
683
|
Requires-Dist: boto3<2.0.0,>=1.36.16
|
|
684
684
|
Requires-Dist: geemap
|
|
685
685
|
Requires-Dist: uv
|
|
686
|
+
Requires-Dist: seaborn
|
|
686
687
|
Provides-Extra: dev
|
|
687
688
|
Requires-Dist: pytest; extra == "dev"
|
|
688
689
|
Requires-Dist: black; extra == "dev"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "fimeval"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.55"
|
|
4
4
|
description = "A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.10"
|
|
@@ -30,7 +30,8 @@ dependencies = [
|
|
|
30
30
|
"notebook>=7.3.2,<8.0.0",
|
|
31
31
|
"boto3>=1.36.16,<2.0.0",
|
|
32
32
|
"geemap",
|
|
33
|
-
"uv"
|
|
33
|
+
"uv",
|
|
34
|
+
"seaborn"
|
|
34
35
|
]
|
|
35
36
|
|
|
36
37
|
[project.optional-dependencies]
|
|
@@ -6,6 +6,8 @@ import pandas as pd
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from plotly.subplots import make_subplots
|
|
8
8
|
import plotly.graph_objects as go
|
|
9
|
+
import seaborn as sns
|
|
10
|
+
import matplotlib.pyplot as plt
|
|
9
11
|
|
|
10
12
|
|
|
11
13
|
def Changeintogpkg(input_path, output_dir, layer_name):
|
|
@@ -19,7 +21,8 @@ def Changeintogpkg(input_path, output_dir, layer_name):
|
|
|
19
21
|
output_gpkg = os.path.join(output_dir, f"{layer_name}.gpkg")
|
|
20
22
|
gdf.to_file(output_gpkg, driver="GPKG")
|
|
21
23
|
return output_gpkg
|
|
22
|
-
|
|
24
|
+
|
|
25
|
+
|
|
23
26
|
def GetFloodedBuildingCountInfo(
|
|
24
27
|
building_fp_path,
|
|
25
28
|
study_area_path,
|
|
@@ -57,7 +60,6 @@ def GetFloodedBuildingCountInfo(
|
|
|
57
60
|
"True Positive": 0,
|
|
58
61
|
}
|
|
59
62
|
|
|
60
|
-
# Count centroids in the contingency map
|
|
61
63
|
def count_centroids_in_contingency(raster_path):
|
|
62
64
|
with rasterio.open(raster_path) as src:
|
|
63
65
|
raster_data = src.read(1)
|
|
@@ -74,11 +76,9 @@ def GetFloodedBuildingCountInfo(
|
|
|
74
76
|
|
|
75
77
|
count_centroids_in_contingency(contingency_map)
|
|
76
78
|
|
|
77
|
-
# Calculate Candidate and Benchmark counts from the contingency map counts
|
|
78
79
|
centroid_counts["Candidate"] = centroid_counts["True Positive"] + centroid_counts["False Positive"]
|
|
79
80
|
centroid_counts["Benchmark"] = centroid_counts["True Positive"] + centroid_counts["False Negative"]
|
|
80
81
|
|
|
81
|
-
|
|
82
82
|
total_buildings = len(clipped_buildings)
|
|
83
83
|
percentages = {
|
|
84
84
|
key: (count / total_buildings) * 100 if total_buildings > 0 else 0
|
|
@@ -92,15 +92,11 @@ def GetFloodedBuildingCountInfo(
|
|
|
92
92
|
CSI = TP / (TP + FP + FN) if (TP + FP + FN) > 0 else 0
|
|
93
93
|
FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
|
|
94
94
|
POD = TP / (TP + FN) if (TP + FN) > 0 else 0
|
|
95
|
-
|
|
96
95
|
if centroid_counts["Benchmark"] > 0:
|
|
97
|
-
BDR = (
|
|
98
|
-
(centroid_counts["Candidate"] - centroid_counts["Benchmark"])
|
|
99
|
-
/ centroid_counts["Benchmark"]
|
|
100
|
-
)
|
|
96
|
+
BDR = (centroid_counts["Candidate"] - centroid_counts["Benchmark"]) / centroid_counts["Benchmark"]
|
|
101
97
|
else:
|
|
102
|
-
|
|
103
|
-
|
|
98
|
+
BDR = 0
|
|
99
|
+
|
|
104
100
|
counts_data = {
|
|
105
101
|
"Category": [
|
|
106
102
|
"Candidate",
|
|
@@ -125,13 +121,12 @@ def GetFloodedBuildingCountInfo(
|
|
|
125
121
|
f"{BDR:.3f}",
|
|
126
122
|
],
|
|
127
123
|
}
|
|
128
|
-
|
|
129
124
|
counts_df = pd.DataFrame(counts_data)
|
|
130
|
-
csv_file_path = os.path.join(
|
|
131
|
-
|
|
132
|
-
)
|
|
125
|
+
csv_file_path = os.path.join(save_dir, "EvaluationMetrics", f"BuildingCounts_{basename}.csv")
|
|
126
|
+
os.makedirs(os.path.dirname(csv_file_path), exist_ok=True)
|
|
133
127
|
counts_df.to_csv(csv_file_path, index=False)
|
|
134
128
|
|
|
129
|
+
# Plotly interactive visualization only
|
|
135
130
|
third_raster_labels = ["False Positive", "False Negative", "True Positive"]
|
|
136
131
|
third_raster_counts = [
|
|
137
132
|
centroid_counts["False Positive"],
|
|
@@ -140,80 +135,108 @@ def GetFloodedBuildingCountInfo(
|
|
|
140
135
|
]
|
|
141
136
|
|
|
142
137
|
fig = make_subplots(
|
|
143
|
-
rows=1,
|
|
144
|
-
|
|
145
|
-
subplot_titles=(
|
|
146
|
-
"Building Counts on Different FIMs",
|
|
147
|
-
"Contingency Flooded Building Counts",
|
|
148
|
-
),
|
|
138
|
+
rows=1, cols=2,
|
|
139
|
+
subplot_titles=("Building Counts on Different FIMs", "Contingency Flooded Building Counts"),
|
|
149
140
|
)
|
|
150
141
|
|
|
151
142
|
fig.add_trace(
|
|
152
143
|
go.Bar(
|
|
153
|
-
x=["Candidate"],
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
textposition="auto",
|
|
157
|
-
marker_color="#1c83eb",
|
|
158
|
-
marker_line_color="black",
|
|
159
|
-
marker_line_width=1,
|
|
144
|
+
x=["Candidate"], y=[centroid_counts["Candidate"]],
|
|
145
|
+
text=[f"{centroid_counts['Candidate']}"], textposition="auto",
|
|
146
|
+
marker_color="#1c83eb", marker_line_color="black", marker_line_width=1,
|
|
160
147
|
name=f"Candidate ({percentages['Candidate']:.2f}%)",
|
|
161
148
|
),
|
|
162
|
-
row=1,
|
|
163
|
-
col=1,
|
|
149
|
+
row=1, col=1,
|
|
164
150
|
)
|
|
165
|
-
|
|
166
151
|
fig.add_trace(
|
|
167
152
|
go.Bar(
|
|
168
|
-
x=["Benchmark"],
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
textposition="auto",
|
|
172
|
-
marker_color="#a4490e",
|
|
173
|
-
marker_line_color="black",
|
|
174
|
-
marker_line_width=1,
|
|
153
|
+
x=["Benchmark"], y=[centroid_counts["Benchmark"]],
|
|
154
|
+
text=[f"{centroid_counts['Benchmark']}"], textposition="auto",
|
|
155
|
+
marker_color="#a4490e", marker_line_color="black", marker_line_width=1,
|
|
175
156
|
name=f"Benchmark ({percentages['Benchmark']:.2f}%)",
|
|
176
157
|
),
|
|
177
|
-
row=1,
|
|
178
|
-
col=1,
|
|
158
|
+
row=1, col=1,
|
|
179
159
|
)
|
|
180
160
|
|
|
181
|
-
for i in
|
|
161
|
+
for i, label in enumerate(third_raster_labels):
|
|
182
162
|
fig.add_trace(
|
|
183
163
|
go.Bar(
|
|
184
|
-
x=[
|
|
185
|
-
|
|
186
|
-
text=[f"{third_raster_counts[i]}"],
|
|
187
|
-
textposition="auto",
|
|
164
|
+
x=[label], y=[third_raster_counts[i]],
|
|
165
|
+
text=[f"{third_raster_counts[i]}"], textposition="auto",
|
|
188
166
|
marker_color=["#ff5733", "#ffc300", "#28a745"][i],
|
|
189
|
-
marker_line_color="black",
|
|
190
|
-
|
|
191
|
-
name=f"{third_raster_labels[i]} ({percentages[third_raster_labels[i]]:.2f}%)",
|
|
167
|
+
marker_line_color="black", marker_line_width=1,
|
|
168
|
+
name=f"{label} ({percentages[label]:.2f}%)",
|
|
192
169
|
),
|
|
193
|
-
row=1,
|
|
194
|
-
col=2,
|
|
170
|
+
row=1, col=2,
|
|
195
171
|
)
|
|
196
172
|
|
|
197
173
|
fig.update_layout(
|
|
198
174
|
title="Flooded Building Counts",
|
|
199
175
|
xaxis_title="Inundation Surface",
|
|
200
176
|
yaxis_title="Flooded Building Counts",
|
|
201
|
-
width=1100,
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
paper_bgcolor="rgba(0, 0, 0, 0)",
|
|
205
|
-
showlegend=True,
|
|
206
|
-
font=dict(family="Arial", size=18, color="black"),
|
|
177
|
+
width=1100, height=400,
|
|
178
|
+
plot_bgcolor="rgba(0,0,0,0)", paper_bgcolor="rgba(0,0,0,0)",
|
|
179
|
+
showlegend=True, font=dict(family="Arial", size=18, color="black"),
|
|
207
180
|
)
|
|
181
|
+
fig.show()
|
|
182
|
+
|
|
183
|
+
# Seaborn for static PNG saving only
|
|
184
|
+
df_left = pd.DataFrame({
|
|
185
|
+
"Category": ["Candidate", "Benchmark"],
|
|
186
|
+
"Count": [centroid_counts["Candidate"], centroid_counts["Benchmark"]],
|
|
187
|
+
})
|
|
188
|
+
df_right = pd.DataFrame({
|
|
189
|
+
"Category": third_raster_labels,
|
|
190
|
+
"Count": third_raster_counts,
|
|
191
|
+
})
|
|
192
|
+
|
|
193
|
+
sns.set_theme(style="whitegrid")
|
|
194
|
+
fig_sb, axes = plt.subplots(1, 2, figsize=(8, 3), constrained_layout=True)
|
|
195
|
+
|
|
196
|
+
def style_axes(ax, title_text, xlab, show_ylabel: bool):
|
|
197
|
+
# Adding a bit of padding so bar labels don’t overlap with the title
|
|
198
|
+
ax.set_title(title_text, fontsize=16, pad=20)
|
|
199
|
+
ax.set_xlabel(xlab, fontsize=14, color="black")
|
|
200
|
+
if show_ylabel:
|
|
201
|
+
ax.set_ylabel("Flooded Building Counts", fontsize=14, color="black")
|
|
202
|
+
else:
|
|
203
|
+
ax.set_ylabel("")
|
|
204
|
+
|
|
205
|
+
# Thicker black left/bottom spines
|
|
206
|
+
for spine in ("left", "bottom"):
|
|
207
|
+
ax.spines[spine].set_linewidth(1.5)
|
|
208
|
+
ax.spines[spine].set_color("black")
|
|
209
|
+
|
|
210
|
+
sns.despine(ax=ax, right=True, top=True)
|
|
211
|
+
ax.tick_params(axis="x", labelsize=12, colors="black")
|
|
212
|
+
ax.tick_params(axis="y", labelsize=12, colors="black")
|
|
213
|
+
|
|
214
|
+
# Left panel
|
|
215
|
+
ax0 = axes[0]
|
|
216
|
+
sns.barplot(data=df_left, x="Category", y="Count", ax=ax0,
|
|
217
|
+
palette=["#1c83eb", "#a4490e"])
|
|
218
|
+
style_axes(ax0, "Building Counts on Different FIMs", "Inundation Surface", show_ylabel=True)
|
|
219
|
+
for c in ax0.containers:
|
|
220
|
+
ax0.bar_label(c, fmt="%.0f", label_type="edge", padding=3, fontsize=14, color="black")
|
|
221
|
+
|
|
222
|
+
# Right panel
|
|
223
|
+
ax1 = axes[1]
|
|
224
|
+
sns.barplot(data=df_right, x="Category", y="Count", ax=ax1,
|
|
225
|
+
palette=["#ff5733", "#ffc300", "#28a745"])
|
|
226
|
+
style_axes(ax1, "Contingency Flooded Building Counts", "Category", show_ylabel=False)
|
|
227
|
+
for c in ax1.containers:
|
|
228
|
+
ax1.bar_label(c, fmt="%.0f", label_type="edge", padding=3, fontsize=14, color="black")
|
|
208
229
|
|
|
209
230
|
plot_dir = os.path.join(save_dir, "FinalPlots")
|
|
210
|
-
|
|
211
|
-
os.makedirs(plot_dir)
|
|
231
|
+
os.makedirs(plot_dir, exist_ok=True)
|
|
212
232
|
output_path = os.path.join(plot_dir, f"BuildingCounts_{basename}.png")
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
233
|
+
fig_sb.savefig(output_path, dpi=400)
|
|
234
|
+
plt.close(fig_sb)
|
|
235
|
+
|
|
236
|
+
print(f"PNG were saved in : {output_path}")
|
|
237
|
+
|
|
216
238
|
|
|
239
|
+
|
|
217
240
|
|
|
218
241
|
def process_TIFF(
|
|
219
242
|
tif_files, contingency_files, building_footprint, boundary, method_path
|
|
@@ -116,7 +116,7 @@ def evaluateFIM(
|
|
|
116
116
|
bounding_geom = AOI(benchmark_path, shapefile, save_dir)
|
|
117
117
|
|
|
118
118
|
else:
|
|
119
|
-
print(f"
|
|
119
|
+
print(f"**{method.__name__} is processing**")
|
|
120
120
|
bounding_geom = method(smallest_raster_path, save_dir=save_dir)
|
|
121
121
|
|
|
122
122
|
# Read and process benchmark raster
|
|
@@ -406,7 +406,6 @@ def EvaluateFIM(
|
|
|
406
406
|
gdf = gpd.read_file(PWB_dir)
|
|
407
407
|
|
|
408
408
|
# Grant the permission to the main directory
|
|
409
|
-
print(f"Fixing permissions for {main_dir}...")
|
|
410
409
|
fix_permissions(main_dir)
|
|
411
410
|
|
|
412
411
|
# runt the process
|
|
@@ -425,12 +424,11 @@ def EvaluateFIM(
|
|
|
425
424
|
for tif_file in tif_files:
|
|
426
425
|
if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
|
|
427
426
|
benchmark_path = tif_file
|
|
428
|
-
print(f"---Benchmark: {tif_file.name}---")
|
|
429
427
|
else:
|
|
430
428
|
candidate_path.append(tif_file)
|
|
431
429
|
|
|
432
430
|
if benchmark_path and candidate_path:
|
|
433
|
-
print(f"
|
|
431
|
+
print(f"**Flood Inundation Evaluation of {folder_dir.name}**")
|
|
434
432
|
Metrics = evaluateFIM(
|
|
435
433
|
benchmark_path,
|
|
436
434
|
candidate_path,
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import glob
|
|
3
|
+
import pandas as pd
|
|
4
|
+
import plotly.express as px
|
|
5
|
+
import seaborn as sns
|
|
6
|
+
import matplotlib.pyplot as plt
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# Function to plot individual metric scores
|
|
10
|
+
def PlotMetrics(csv_path, method_path):
|
|
11
|
+
metrics_df = pd.read_csv(csv_path)
|
|
12
|
+
|
|
13
|
+
# Keep only the desired metrics
|
|
14
|
+
metrics = metrics_df.loc[
|
|
15
|
+
metrics_df["Metrics"].isin([
|
|
16
|
+
"CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"
|
|
17
|
+
])
|
|
18
|
+
].copy()
|
|
19
|
+
|
|
20
|
+
# Rename for presentation
|
|
21
|
+
metrics["Metrics"] = metrics["Metrics"].replace({
|
|
22
|
+
"CSI_values": "CSI",
|
|
23
|
+
"POD_values": "POD",
|
|
24
|
+
"Acc_values": "Accuracy",
|
|
25
|
+
"Prec_values": "Precision",
|
|
26
|
+
"F1_values": "F1 Score",
|
|
27
|
+
})
|
|
28
|
+
|
|
29
|
+
value_columns = metrics.select_dtypes(include="number").columns
|
|
30
|
+
|
|
31
|
+
# Output directory
|
|
32
|
+
plot_dir = os.path.join(method_path, "FinalPlots")
|
|
33
|
+
os.makedirs(plot_dir, exist_ok=True)
|
|
34
|
+
|
|
35
|
+
for value_column in value_columns:
|
|
36
|
+
metrics[value_column] = metrics[value_column].round(2)
|
|
37
|
+
|
|
38
|
+
# Showing with Plotly
|
|
39
|
+
fig_plotly = px.bar(
|
|
40
|
+
metrics,
|
|
41
|
+
x=value_column,
|
|
42
|
+
y="Metrics",
|
|
43
|
+
text=value_column,
|
|
44
|
+
color="Metrics",
|
|
45
|
+
orientation="h",
|
|
46
|
+
color_discrete_sequence=px.colors.qualitative.Set2,
|
|
47
|
+
title=f"Performance Metrics"
|
|
48
|
+
)
|
|
49
|
+
fig_plotly.update_traces(texttemplate="%{text:.2f}", textposition="outside")
|
|
50
|
+
fig_plotly.update_layout(
|
|
51
|
+
showlegend=False,
|
|
52
|
+
plot_bgcolor="rgba(0,0,0,0)",
|
|
53
|
+
paper_bgcolor="rgba(0,0,0,0)",
|
|
54
|
+
height=350,
|
|
55
|
+
width=900,
|
|
56
|
+
xaxis=dict(showline=True, linewidth=2, linecolor="black", title="Score"),
|
|
57
|
+
yaxis=dict(showline=True, linewidth=2, linecolor="black"),
|
|
58
|
+
title_font=dict(family="Arial", size=24, color="black"),
|
|
59
|
+
font=dict(family="Arial", size=18, color="black"),
|
|
60
|
+
)
|
|
61
|
+
fig_plotly.show()
|
|
62
|
+
|
|
63
|
+
# Save with Seaborn
|
|
64
|
+
sns.set_theme(style="whitegrid")
|
|
65
|
+
fig, ax = plt.subplots(figsize=(8, 3))
|
|
66
|
+
sns.barplot(
|
|
67
|
+
data=metrics,
|
|
68
|
+
x=value_column,
|
|
69
|
+
y="Metrics",
|
|
70
|
+
hue="Metrics",
|
|
71
|
+
palette="Set2",
|
|
72
|
+
ax=ax,
|
|
73
|
+
dodge=False,
|
|
74
|
+
legend=False
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Annotate bars
|
|
78
|
+
for container in ax.containers:
|
|
79
|
+
ax.bar_label(container, fmt='%.2f', label_type='edge', fontsize=14)
|
|
80
|
+
|
|
81
|
+
# Styling
|
|
82
|
+
ax.set_title("Performance Metrics", fontsize=16)
|
|
83
|
+
ax.set_xlabel("Score", fontsize=16, color="black") # just bigger, not bold
|
|
84
|
+
ax.set_ylabel("Metrics", fontsize=16, color="black")
|
|
85
|
+
|
|
86
|
+
ax.set_xticks([i/10 for i in range(0, 11, 2)])
|
|
87
|
+
ax.set_xticklabels([f"{i/10:.1f}" for i in range(0, 11, 2)], fontsize=14, color="black")
|
|
88
|
+
|
|
89
|
+
# Increase y-tick label font size
|
|
90
|
+
ax.tick_params(axis="y", labelsize=12, colors="black")
|
|
91
|
+
ax.tick_params(axis="x", labelsize=14, colors="black")
|
|
92
|
+
|
|
93
|
+
# Force spines black + thicker
|
|
94
|
+
ax.spines['left'].set_linewidth(1.5)
|
|
95
|
+
ax.spines['bottom'].set_linewidth(1.5)
|
|
96
|
+
ax.spines['left'].set_color("black")
|
|
97
|
+
ax.spines['bottom'].set_color("black")
|
|
98
|
+
|
|
99
|
+
sns.despine(right=True, top=True)
|
|
100
|
+
|
|
101
|
+
# Save to file
|
|
102
|
+
save_path = os.path.join(plot_dir, f"EvaluationMetrics_{value_column}.png")
|
|
103
|
+
plt.tight_layout()
|
|
104
|
+
fig.savefig(save_path, dpi=400)
|
|
105
|
+
plt.close(fig)
|
|
106
|
+
print(f"PNG saved at: {save_path}")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def PlotEvaluationMetrics(main_dir, method_name, out_dir):
|
|
110
|
+
|
|
111
|
+
# If main directory contains the .tif files directly
|
|
112
|
+
tif_files_main = glob.glob(os.path.join(main_dir, "*.tif"))
|
|
113
|
+
if tif_files_main:
|
|
114
|
+
method_path = os.path.join(out_dir, os.path.basename(main_dir), method_name)
|
|
115
|
+
Evaluation_Metrics = os.path.join(method_path, "EvaluationMetrics")
|
|
116
|
+
csv_files = os.path.join(Evaluation_Metrics, "EvaluationMetrics.csv")
|
|
117
|
+
if not csv_files:
|
|
118
|
+
print(f"No EvaluationMetrics CSV files found in '{Evaluation_Metrics}'.")
|
|
119
|
+
else:
|
|
120
|
+
PlotMetrics(csv_files, method_path)
|
|
121
|
+
|
|
122
|
+
# Traverse all folders in main_dir if no .tif files directly in main_dir
|
|
123
|
+
else:
|
|
124
|
+
for folder in os.listdir(main_dir):
|
|
125
|
+
folder_path = os.path.join(out_dir, folder)
|
|
126
|
+
if os.path.isdir(folder_path):
|
|
127
|
+
method_path = os.path.join(folder_path, method_name)
|
|
128
|
+
Evaluation_Metrics = os.path.join(method_path, "EvaluationMetrics")
|
|
129
|
+
csv_files = os.path.join(Evaluation_Metrics, "EvaluationMetrics.csv")
|
|
130
|
+
if not csv_files:
|
|
131
|
+
print(
|
|
132
|
+
f"No EvaluationMetrics CSV files found in '{Evaluation_Metrics}'."
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
PlotMetrics(csv_files, method_path)
|
|
@@ -71,7 +71,6 @@ def reprojectFIMs(src_path, dst_path, target_crs):
|
|
|
71
71
|
resampling=Resampling.nearest,
|
|
72
72
|
)
|
|
73
73
|
else:
|
|
74
|
-
print(f"Source raster is already in {target_crs}. No reprojection needed.")
|
|
75
74
|
shutil.copy(src_path, dst_path)
|
|
76
75
|
compress_tif_lzw(dst_path)
|
|
77
76
|
|
|
@@ -79,9 +78,7 @@ def reprojectFIMs(src_path, dst_path, target_crs):
|
|
|
79
78
|
# Resample into the coarser resoution amoung all FIMS within the case
|
|
80
79
|
def resample_to_resolution(src_path, x_resolution, y_resolution):
|
|
81
80
|
src_path = Path(src_path)
|
|
82
|
-
print(src_path)
|
|
83
81
|
temp_path = src_path.with_name(src_path.stem + "_resampled.tif")
|
|
84
|
-
print(temp_path)
|
|
85
82
|
|
|
86
83
|
with rasterio.open(src_path) as src:
|
|
87
84
|
transform = rasterio.transform.from_origin(
|
|
@@ -144,14 +141,13 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
|
|
|
144
141
|
if not final_crs:
|
|
145
142
|
if all(is_within_conus(b, c) for b, c in zip(bounds_list, crs_list)):
|
|
146
143
|
final_crs = "EPSG:5070"
|
|
147
|
-
print(f"Defaulting to CONUS CRS: {final_crs}")
|
|
144
|
+
print(f"Defaulting to CONUS CRS: {final_crs}, Reprojecting.")
|
|
148
145
|
else:
|
|
149
146
|
print(
|
|
150
147
|
"Mixed or non-CONUS CRS detected. Please provide a valid target CRS."
|
|
151
148
|
)
|
|
152
149
|
return
|
|
153
150
|
|
|
154
|
-
print(f"Reprojecting all rasters to {final_crs}")
|
|
155
151
|
for src_path in tif_files:
|
|
156
152
|
dst_path = processing_folder / src_path.name
|
|
157
153
|
reprojectFIMs(str(src_path), str(dst_path), final_crs)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fimeval
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.55
|
|
4
4
|
Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
|
|
5
5
|
Author: Surface Dynamics Modeling Lab
|
|
6
6
|
Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
|
|
@@ -683,6 +683,7 @@ Requires-Dist: notebook<8.0.0,>=7.3.2
|
|
|
683
683
|
Requires-Dist: boto3<2.0.0,>=1.36.16
|
|
684
684
|
Requires-Dist: geemap
|
|
685
685
|
Requires-Dist: uv
|
|
686
|
+
Requires-Dist: seaborn
|
|
686
687
|
Provides-Extra: dev
|
|
687
688
|
Requires-Dist: pytest; extra == "dev"
|
|
688
689
|
Requires-Dist: black; extra == "dev"
|
|
@@ -3,10 +3,12 @@ from pathlib import Path
|
|
|
3
3
|
|
|
4
4
|
Main_dir = (
|
|
5
5
|
# "../docs/sampledata"
|
|
6
|
+
"/Users/supath/Downloads/MSResearch/FIMpef/fimpef/docs/sampledata"
|
|
6
7
|
)
|
|
7
8
|
PWD_dir = "./path/to/PWB"
|
|
8
9
|
output_dir = (
|
|
9
10
|
# "./path/to/output" # This is the output directory where the results will be saved
|
|
11
|
+
"/Users/supath/Downloads/MSResearch/FIMpef/fimpef/docs/output"
|
|
10
12
|
)
|
|
11
13
|
target_crs = "EPSG:5070" # Target CRS for reprojecting the FIMs, need to be in EPSG code of Projected CRS
|
|
12
14
|
target_resolution = 10 # This will be in meters, if it passes the FIMS will be resampled to this resolution else, it will find the coarser resolution among all FIMS for this case and use that to resample!
|
|
@@ -29,7 +31,7 @@ countryISO = "USA"
|
|
|
29
31
|
def test_evaluation_framework():
|
|
30
32
|
# Run the evaluation
|
|
31
33
|
# It has the Permanent Water Bodies (PWB) dataset as default for United States
|
|
32
|
-
fe.EvaluateFIM(Main_dir, method_name, output_dir)
|
|
34
|
+
# fe.EvaluateFIM(Main_dir, method_name, output_dir)
|
|
33
35
|
|
|
34
36
|
# OR, If the Evaluation Study Area is outside the US or, user has their own PWB dataset
|
|
35
37
|
# fe.EvaluateFIM(Main_dir, method_name, output_dir)
|
|
@@ -1,102 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import glob
|
|
3
|
-
import pandas as pd
|
|
4
|
-
import plotly.express as px
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# Function to plot individual metric scores
|
|
8
|
-
def PlotMetrics(csv_path, method_path):
|
|
9
|
-
metrics_df = pd.read_csv(csv_path)
|
|
10
|
-
# Extract relevant metrics
|
|
11
|
-
metrics = metrics_df.loc[
|
|
12
|
-
metrics_df["Metrics"].isin(
|
|
13
|
-
["CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"]
|
|
14
|
-
)
|
|
15
|
-
].copy()
|
|
16
|
-
|
|
17
|
-
metrics.loc[:, "Metrics"] = metrics["Metrics"].replace(
|
|
18
|
-
{
|
|
19
|
-
"CSI_values": "CSI",
|
|
20
|
-
"POD_values": "POD",
|
|
21
|
-
"Acc_values": "Accuracy",
|
|
22
|
-
"Prec_values": "Precision",
|
|
23
|
-
"F1_values": "F1 Score",
|
|
24
|
-
}
|
|
25
|
-
)
|
|
26
|
-
value_columns = metrics.select_dtypes(include="number").columns
|
|
27
|
-
|
|
28
|
-
for value_column in value_columns:
|
|
29
|
-
metrics[value_column] = metrics[value_column].round(2)
|
|
30
|
-
|
|
31
|
-
# Create the bar plot
|
|
32
|
-
fig = px.bar(
|
|
33
|
-
metrics,
|
|
34
|
-
x=value_column,
|
|
35
|
-
y="Metrics",
|
|
36
|
-
title=f"Performance Metrics",
|
|
37
|
-
labels={value_column: "Score"},
|
|
38
|
-
text=value_column,
|
|
39
|
-
color="Metrics",
|
|
40
|
-
color_discrete_sequence=px.colors.qualitative.Set2,
|
|
41
|
-
)
|
|
42
|
-
fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
|
|
43
|
-
fig.update_layout(
|
|
44
|
-
yaxis_title="Metrics",
|
|
45
|
-
xaxis_title="Score",
|
|
46
|
-
showlegend=False,
|
|
47
|
-
plot_bgcolor="rgba(0, 0, 0, 0)",
|
|
48
|
-
paper_bgcolor="rgba(0, 0, 0, 0)",
|
|
49
|
-
margin=dict(l=10, r=10, t=40, b=10),
|
|
50
|
-
xaxis=dict(showline=True, linewidth=2, linecolor="black"),
|
|
51
|
-
yaxis=dict(showline=True, linewidth=2, linecolor="black"),
|
|
52
|
-
height=350,
|
|
53
|
-
width=900,
|
|
54
|
-
title_font=dict(family="Arial", size=24, color="black"),
|
|
55
|
-
xaxis_title_font=dict(family="Arial", size=20, color="black"),
|
|
56
|
-
yaxis_title_font=dict(family="Arial", size=20, color="black"),
|
|
57
|
-
font=dict(family="Arial", size=18, color="black"),
|
|
58
|
-
)
|
|
59
|
-
|
|
60
|
-
# Save each plot as a PNG, using the column name as the filename
|
|
61
|
-
plot_dir = os.path.join(method_path, "FinalPlots")
|
|
62
|
-
if not os.path.exists(plot_dir):
|
|
63
|
-
os.makedirs(plot_dir)
|
|
64
|
-
|
|
65
|
-
output_filename = f"EvaluationMetrics_{value_column}.png"
|
|
66
|
-
output_path = os.path.join(plot_dir, output_filename)
|
|
67
|
-
|
|
68
|
-
# Save the plot as PNG
|
|
69
|
-
fig.write_image(output_path, engine="kaleido", scale=500 / 96)
|
|
70
|
-
print(
|
|
71
|
-
f"Performance metrics chart ({value_column}) saved as PNG at {output_path}"
|
|
72
|
-
)
|
|
73
|
-
fig.show()
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def PlotEvaluationMetrics(main_dir, method_name, out_dir):
|
|
77
|
-
|
|
78
|
-
# If main directory contains the .tif files directly
|
|
79
|
-
tif_files_main = glob.glob(os.path.join(main_dir, "*.tif"))
|
|
80
|
-
if tif_files_main:
|
|
81
|
-
method_path = os.path.join(out_dir, os.path.basename(main_dir), method_name)
|
|
82
|
-
Evaluation_Metrics = os.path.join(method_path, "EvaluationMetrics")
|
|
83
|
-
csv_files = os.path.join(Evaluation_Metrics, "EvaluationMetrics.csv")
|
|
84
|
-
if not csv_files:
|
|
85
|
-
print(f"No EvaluationMetrics CSV files found in '{Evaluation_Metrics}'.")
|
|
86
|
-
else:
|
|
87
|
-
PlotMetrics(csv_files, method_path)
|
|
88
|
-
|
|
89
|
-
# Traverse all folders in main_dir if no .tif files directly in main_dir
|
|
90
|
-
else:
|
|
91
|
-
for folder in os.listdir(main_dir):
|
|
92
|
-
folder_path = os.path.join(out_dir, folder)
|
|
93
|
-
if os.path.isdir(folder_path):
|
|
94
|
-
method_path = os.path.join(folder_path, method_name)
|
|
95
|
-
Evaluation_Metrics = os.path.join(method_path, "EvaluationMetrics")
|
|
96
|
-
csv_files = os.path.join(Evaluation_Metrics, "EvaluationMetrics.csv")
|
|
97
|
-
if not csv_files:
|
|
98
|
-
print(
|
|
99
|
-
f"No EvaluationMetrics CSV files found in '{Evaluation_Metrics}'."
|
|
100
|
-
)
|
|
101
|
-
else:
|
|
102
|
-
PlotMetrics(csv_files, method_path)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|