fimeval 0.1.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fimeval/BuildingFootprint/__init__.py +3 -0
- fimeval/BuildingFootprint/evaluationwithBF.py +399 -0
- fimeval/ContingencyMap/PWBs3.py +41 -0
- fimeval/ContingencyMap/__init__.py +6 -0
- fimeval/ContingencyMap/evaluationFIM.py +413 -0
- fimeval/ContingencyMap/methods.py +94 -0
- fimeval/ContingencyMap/metrics.py +44 -0
- fimeval/ContingencyMap/plotevaluationmetrics.py +102 -0
- fimeval/ContingencyMap/printcontingency.py +144 -0
- fimeval/__init__.py +11 -0
- fimeval/utilis.py +182 -0
- fimeval-0.1.43.dist-info/LICENSE.txt +661 -0
- fimeval-0.1.43.dist-info/METADATA +184 -0
- fimeval-0.1.43.dist-info/RECORD +15 -0
- fimeval-0.1.43.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,399 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import ee
|
|
3
|
+
import glob
|
|
4
|
+
import geopandas as gpd
|
|
5
|
+
import rasterio
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from plotly.subplots import make_subplots
|
|
9
|
+
import plotly.graph_objects as go
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def Changeintogpkg(input_path, output_dir, layer_name):
|
|
13
|
+
input_path = str(input_path)
|
|
14
|
+
# Check if the file is already in GPKG format
|
|
15
|
+
if input_path.endswith(".gpkg"):
|
|
16
|
+
return input_path
|
|
17
|
+
else:
|
|
18
|
+
# Convert to GPKG format if it's not
|
|
19
|
+
gdf = gpd.read_file(input_path)
|
|
20
|
+
output_gpkg = os.path.join(output_dir, f"{layer_name}.gpkg")
|
|
21
|
+
gdf.to_file(output_gpkg, driver="GPKG")
|
|
22
|
+
return output_gpkg
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def Changeintogpkg(input_path, output_dir, layer_name):
|
|
26
|
+
input_path = str(input_path)
|
|
27
|
+
if input_path.endswith(".gpkg"):
|
|
28
|
+
return input_path
|
|
29
|
+
else:
|
|
30
|
+
gdf = gpd.read_file(input_path)
|
|
31
|
+
output_gpkg = os.path.join(output_dir, f"{layer_name}.gpkg")
|
|
32
|
+
gdf.to_file(output_gpkg, driver="GPKG")
|
|
33
|
+
return output_gpkg
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def GetFloodedBuildingCountInfo(
|
|
37
|
+
building_fp_path,
|
|
38
|
+
study_area_path,
|
|
39
|
+
raster1_path,
|
|
40
|
+
raster2_path,
|
|
41
|
+
contingency_map,
|
|
42
|
+
save_dir,
|
|
43
|
+
basename,
|
|
44
|
+
):
|
|
45
|
+
output_dir = os.path.dirname(building_fp_path)
|
|
46
|
+
|
|
47
|
+
building_fp_gpkg = Changeintogpkg(
|
|
48
|
+
building_fp_path, output_dir, "building_footprint"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
building_gdf = gpd.read_file(building_fp_gpkg)
|
|
52
|
+
study_area_gdf = gpd.read_file(study_area_path)
|
|
53
|
+
|
|
54
|
+
if building_gdf.crs != study_area_gdf.crs:
|
|
55
|
+
building_gdf = building_gdf.to_crs(study_area_gdf.crs)
|
|
56
|
+
|
|
57
|
+
clipped_buildings = gpd.overlay(building_gdf, study_area_gdf, how="intersection")
|
|
58
|
+
clipped_buildings["centroid"] = clipped_buildings.geometry.centroid
|
|
59
|
+
|
|
60
|
+
centroid_counts = {
|
|
61
|
+
"Benchmark": 0,
|
|
62
|
+
"Candidate": 0,
|
|
63
|
+
"False Positive": 0,
|
|
64
|
+
"False Negative": 0,
|
|
65
|
+
"True Positive": 0,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
def count_centroids_in_raster(raster_path, label):
|
|
69
|
+
with rasterio.open(raster_path) as src:
|
|
70
|
+
raster_data = src.read(1)
|
|
71
|
+
transform = src.transform
|
|
72
|
+
|
|
73
|
+
for centroid in clipped_buildings["centroid"]:
|
|
74
|
+
row, col = src.index(centroid.x, centroid.y)
|
|
75
|
+
if 0 <= row < raster_data.shape[0] and 0 <= col < raster_data.shape[1]:
|
|
76
|
+
pixel_value = raster_data[row, col]
|
|
77
|
+
if label in ["Benchmark", "Candidate"]:
|
|
78
|
+
if pixel_value == 2: # False Positive
|
|
79
|
+
centroid_counts[label] += 1
|
|
80
|
+
else:
|
|
81
|
+
if pixel_value == 2:
|
|
82
|
+
centroid_counts["False Positive"] += 1
|
|
83
|
+
elif pixel_value == 3:
|
|
84
|
+
centroid_counts["False Negative"] += 1
|
|
85
|
+
elif pixel_value == 4:
|
|
86
|
+
centroid_counts["True Positive"] += 1
|
|
87
|
+
|
|
88
|
+
if "benchmark" in str(raster1_path).lower():
|
|
89
|
+
count_centroids_in_raster(raster1_path, "Benchmark")
|
|
90
|
+
count_centroids_in_raster(raster2_path, "Candidate")
|
|
91
|
+
elif "candidate" in str(raster2_path).lower():
|
|
92
|
+
count_centroids_in_raster(raster1_path, "Candidate")
|
|
93
|
+
count_centroids_in_raster(raster2_path, "Benchmark")
|
|
94
|
+
|
|
95
|
+
if "contingency" in str(contingency_map).lower():
|
|
96
|
+
count_centroids_in_raster(contingency_map, "Contingency")
|
|
97
|
+
|
|
98
|
+
total_buildings = len(clipped_buildings)
|
|
99
|
+
percentages = {
|
|
100
|
+
key: (count / total_buildings) * 100 if total_buildings > 0 else 0
|
|
101
|
+
for key, count in centroid_counts.items()
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
TP = centroid_counts["True Positive"]
|
|
105
|
+
FP = centroid_counts["False Positive"]
|
|
106
|
+
FN = centroid_counts["False Negative"]
|
|
107
|
+
|
|
108
|
+
CSI = TP / (TP + FP + FN) if (TP + FP + FN) > 0 else 0
|
|
109
|
+
FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
|
|
110
|
+
POD = TP / (TP + FN) if (TP + FN) > 0 else 0
|
|
111
|
+
|
|
112
|
+
counts_data = {
|
|
113
|
+
"Category": [
|
|
114
|
+
"Candidate",
|
|
115
|
+
"Benchmark",
|
|
116
|
+
"False Positive",
|
|
117
|
+
"False Negative",
|
|
118
|
+
"True Positive",
|
|
119
|
+
"CSI",
|
|
120
|
+
"FAR",
|
|
121
|
+
"POD",
|
|
122
|
+
],
|
|
123
|
+
"Building Count": [
|
|
124
|
+
centroid_counts["Candidate"],
|
|
125
|
+
centroid_counts["Benchmark"],
|
|
126
|
+
centroid_counts["False Positive"],
|
|
127
|
+
centroid_counts["False Negative"],
|
|
128
|
+
centroid_counts["True Positive"],
|
|
129
|
+
f"{CSI:.3f}",
|
|
130
|
+
f"{FAR:.3f}",
|
|
131
|
+
f"{POD:.3f}",
|
|
132
|
+
],
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
counts_df = pd.DataFrame(counts_data)
|
|
136
|
+
csv_file_path = os.path.join(
|
|
137
|
+
save_dir, "EvaluationMetrics", f"BuildingCounts_{basename}.csv"
|
|
138
|
+
)
|
|
139
|
+
counts_df.to_csv(csv_file_path, index=False)
|
|
140
|
+
|
|
141
|
+
third_raster_labels = ["False Positive", "False Negative", "True Positive"]
|
|
142
|
+
third_raster_counts = [
|
|
143
|
+
centroid_counts["False Positive"],
|
|
144
|
+
centroid_counts["False Negative"],
|
|
145
|
+
centroid_counts["True Positive"],
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
fig = make_subplots(
|
|
149
|
+
rows=1,
|
|
150
|
+
cols=2,
|
|
151
|
+
subplot_titles=(
|
|
152
|
+
"Building Counts on Different FIMs",
|
|
153
|
+
"Contingency Flooded Building Counts",
|
|
154
|
+
),
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
fig.add_trace(
|
|
158
|
+
go.Bar(
|
|
159
|
+
x=["Candidate"],
|
|
160
|
+
y=[centroid_counts["Candidate"]],
|
|
161
|
+
text=[f"{centroid_counts['Candidate']}"],
|
|
162
|
+
textposition="auto",
|
|
163
|
+
marker_color="#1c83eb",
|
|
164
|
+
marker_line_color="black",
|
|
165
|
+
marker_line_width=1,
|
|
166
|
+
name=f"Candidate ({percentages['Candidate']:.2f}%)",
|
|
167
|
+
),
|
|
168
|
+
row=1,
|
|
169
|
+
col=1,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
fig.add_trace(
|
|
173
|
+
go.Bar(
|
|
174
|
+
x=["Benchmark"],
|
|
175
|
+
y=[centroid_counts["Benchmark"]],
|
|
176
|
+
text=[f"{centroid_counts['Benchmark']}"],
|
|
177
|
+
textposition="auto",
|
|
178
|
+
marker_color="#a4490e",
|
|
179
|
+
marker_line_color="black",
|
|
180
|
+
marker_line_width=1,
|
|
181
|
+
name=f"Benchmark ({percentages['Benchmark']:.2f}%)",
|
|
182
|
+
),
|
|
183
|
+
row=1,
|
|
184
|
+
col=1,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
for i in range(len(third_raster_labels)):
|
|
188
|
+
fig.add_trace(
|
|
189
|
+
go.Bar(
|
|
190
|
+
x=[third_raster_labels[i]],
|
|
191
|
+
y=[third_raster_counts[i]],
|
|
192
|
+
text=[f"{third_raster_counts[i]}"],
|
|
193
|
+
textposition="auto",
|
|
194
|
+
marker_color=["#ff5733", "#ffc300", "#28a745"][i],
|
|
195
|
+
marker_line_color="black",
|
|
196
|
+
marker_line_width=1,
|
|
197
|
+
name=f"{third_raster_labels[i]} ({percentages[third_raster_labels[i]]:.2f}%)",
|
|
198
|
+
),
|
|
199
|
+
row=1,
|
|
200
|
+
col=2,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
fig.update_layout(
|
|
204
|
+
title="Flooded Building Counts",
|
|
205
|
+
xaxis_title="Inundation Surface",
|
|
206
|
+
yaxis_title="Flooded Building Counts",
|
|
207
|
+
width=1100,
|
|
208
|
+
height=400,
|
|
209
|
+
plot_bgcolor="rgba(0, 0, 0, 0)",
|
|
210
|
+
paper_bgcolor="rgba(0, 0, 0, 0)",
|
|
211
|
+
showlegend=True,
|
|
212
|
+
font=dict(family="Arial", size=18, color="black"),
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
plot_dir = os.path.join(save_dir, "FinalPlots")
|
|
216
|
+
if not os.path.exists(plot_dir):
|
|
217
|
+
os.makedirs(plot_dir)
|
|
218
|
+
output_path = os.path.join(plot_dir, f"BuildingCounts_{basename}.png")
|
|
219
|
+
fig.write_image(output_path, scale=500 / 96, engine="kaleido")
|
|
220
|
+
print(f"Performance metrics chart is saved as PNG at {output_path}")
|
|
221
|
+
fig.show()
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def process_TIFF(
|
|
225
|
+
tif_files, contingency_files, building_footprint, boundary, method_path
|
|
226
|
+
):
|
|
227
|
+
benchmark_path = None
|
|
228
|
+
candidate_path = []
|
|
229
|
+
|
|
230
|
+
if len(tif_files) == 2:
|
|
231
|
+
for tif_file in tif_files:
|
|
232
|
+
if "benchmark" in tif_file.name.lower():
|
|
233
|
+
benchmark_path = tif_file
|
|
234
|
+
else:
|
|
235
|
+
candidate_path.append(tif_file)
|
|
236
|
+
|
|
237
|
+
elif len(tif_files) > 2:
|
|
238
|
+
for tif_file in tif_files:
|
|
239
|
+
if "benchmark" in tif_file.name.lower():
|
|
240
|
+
benchmark_path = tif_file
|
|
241
|
+
print(f"---Benchmark: {tif_file.name}---")
|
|
242
|
+
else:
|
|
243
|
+
candidate_path.append(tif_file)
|
|
244
|
+
|
|
245
|
+
if benchmark_path and candidate_path:
|
|
246
|
+
for candidate in candidate_path:
|
|
247
|
+
|
|
248
|
+
matching_contingency_map = None
|
|
249
|
+
candidate_base_name = candidate.stem.replace("_clipped", "")
|
|
250
|
+
|
|
251
|
+
for contingency_file in contingency_files:
|
|
252
|
+
if candidate_base_name in contingency_file.name:
|
|
253
|
+
matching_contingency_map = contingency_file
|
|
254
|
+
print(
|
|
255
|
+
f"Found matching contingency map for candidate {candidate.name}: {contingency_file.name}"
|
|
256
|
+
)
|
|
257
|
+
break
|
|
258
|
+
|
|
259
|
+
if matching_contingency_map:
|
|
260
|
+
print(
|
|
261
|
+
f"---FIM evaluation with Building Footprint starts for {candidate.name}---"
|
|
262
|
+
)
|
|
263
|
+
GetFloodedBuildingCountInfo(
|
|
264
|
+
building_footprint,
|
|
265
|
+
boundary,
|
|
266
|
+
benchmark_path,
|
|
267
|
+
candidate,
|
|
268
|
+
matching_contingency_map,
|
|
269
|
+
method_path,
|
|
270
|
+
candidate_base_name,
|
|
271
|
+
)
|
|
272
|
+
else:
|
|
273
|
+
print(
|
|
274
|
+
f"No matching contingency map found for candidate {candidate.name}. Skipping..."
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def find_existing_footprint(out_dir):
|
|
279
|
+
gpkg_files = list(Path(out_dir).glob("*.gpkg"))
|
|
280
|
+
return gpkg_files[0] if gpkg_files else None
|
|
281
|
+
|
|
282
|
+
#Incase user defined individual shapefile for each case study
|
|
283
|
+
def detect_shapefile(folder):
|
|
284
|
+
shapefile = None
|
|
285
|
+
for ext in (".shp", ".gpkg", ".geojson", ".kml"):
|
|
286
|
+
for file in os.listdir(folder):
|
|
287
|
+
if file.lower().endswith(ext):
|
|
288
|
+
shapefile = os.path.join(folder, file)
|
|
289
|
+
print(f"Auto-detected shapefile: {shapefile}")
|
|
290
|
+
return shapefile
|
|
291
|
+
return None
|
|
292
|
+
|
|
293
|
+
def EvaluationWithBuildingFootprint(
|
|
294
|
+
main_dir,
|
|
295
|
+
method_name,
|
|
296
|
+
output_dir,
|
|
297
|
+
country=None,
|
|
298
|
+
building_footprint=None,
|
|
299
|
+
shapefile_dir=None,
|
|
300
|
+
):
|
|
301
|
+
tif_files_main = glob.glob(os.path.join(main_dir, "*.tif"))
|
|
302
|
+
if tif_files_main:
|
|
303
|
+
method_path = os.path.join(output_dir, os.path.basename(main_dir), method_name)
|
|
304
|
+
for folder_name in os.listdir(method_path):
|
|
305
|
+
if folder_name == "MaskedFIMwithBoundary":
|
|
306
|
+
contingency_path = os.path.join(method_path, "ContingencyMaps")
|
|
307
|
+
tif_files = list(
|
|
308
|
+
Path(os.path.join(method_path, folder_name)).glob("*.tif")
|
|
309
|
+
)
|
|
310
|
+
contingency_files = list(Path(contingency_path).glob("*.tif"))
|
|
311
|
+
|
|
312
|
+
if shapefile_dir:
|
|
313
|
+
boundary = shapefile_dir
|
|
314
|
+
elif os.path.exists(
|
|
315
|
+
os.path.join(method_path, "BoundaryforEvaluation")
|
|
316
|
+
):
|
|
317
|
+
boundary = os.path.join(
|
|
318
|
+
method_path, "BoundaryforEvaluation", "FIMEvaluatedExtent.shp"
|
|
319
|
+
)
|
|
320
|
+
else:
|
|
321
|
+
boundary = detect_shapefile(main_dir)
|
|
322
|
+
|
|
323
|
+
building_footprintMS = building_footprint
|
|
324
|
+
if building_footprintMS is None:
|
|
325
|
+
import msfootprint as msf
|
|
326
|
+
|
|
327
|
+
out_dir = os.path.join(method_path, "BuildingFootprint")
|
|
328
|
+
if not os.path.exists(out_dir):
|
|
329
|
+
os.makedirs(out_dir)
|
|
330
|
+
EX_building_footprint = find_existing_footprint(out_dir)
|
|
331
|
+
if not EX_building_footprint:
|
|
332
|
+
boundary_dir = shapefile_dir if shapefile_dir else boundary
|
|
333
|
+
|
|
334
|
+
msf.BuildingFootprintwithISO(country, boundary_dir, out_dir)
|
|
335
|
+
building_footprintMS = os.path.join(
|
|
336
|
+
out_dir, f"building_footprint.gpkg"
|
|
337
|
+
)
|
|
338
|
+
else:
|
|
339
|
+
building_footprintMS = EX_building_footprint
|
|
340
|
+
|
|
341
|
+
process_TIFF(
|
|
342
|
+
tif_files,
|
|
343
|
+
contingency_files,
|
|
344
|
+
building_footprintMS,
|
|
345
|
+
boundary,
|
|
346
|
+
method_path,
|
|
347
|
+
)
|
|
348
|
+
else:
|
|
349
|
+
for folder in os.listdir(main_dir):
|
|
350
|
+
folder_path = os.path.join(output_dir, folder)
|
|
351
|
+
if os.path.isdir(folder_path):
|
|
352
|
+
method_path = os.path.join(folder_path, method_name)
|
|
353
|
+
for folder_name in os.listdir(method_path):
|
|
354
|
+
if folder_name == "MaskedFIMwithBoundary":
|
|
355
|
+
contingency_path = os.path.join(method_path, "ContingencyMaps")
|
|
356
|
+
tif_files = list(
|
|
357
|
+
Path(os.path.join(method_path, folder_name)).glob("*.tif")
|
|
358
|
+
)
|
|
359
|
+
contingency_files = list(Path(contingency_path).glob("*.tif"))
|
|
360
|
+
|
|
361
|
+
if shapefile_dir:
|
|
362
|
+
boundary = shapefile_dir
|
|
363
|
+
elif os.path.exists(
|
|
364
|
+
os.path.join(method_path, "BoundaryforEvaluation")
|
|
365
|
+
):
|
|
366
|
+
boundary = os.path.join(
|
|
367
|
+
method_path, "BoundaryforEvaluation", "FIMEvaluatedExtent.shp"
|
|
368
|
+
)
|
|
369
|
+
else:
|
|
370
|
+
boundary = detect_shapefile(folder)
|
|
371
|
+
|
|
372
|
+
building_footprintMS = building_footprint
|
|
373
|
+
if building_footprintMS is None:
|
|
374
|
+
import msfootprint as msf
|
|
375
|
+
|
|
376
|
+
out_dir = os.path.join(method_path, "BuildingFootprint")
|
|
377
|
+
if not os.path.exists(out_dir):
|
|
378
|
+
os.makedirs(out_dir)
|
|
379
|
+
EX_building_footprint = find_existing_footprint(out_dir)
|
|
380
|
+
if not EX_building_footprint:
|
|
381
|
+
boundary_dir = (
|
|
382
|
+
shapefile_dir if shapefile_dir else boundary
|
|
383
|
+
)
|
|
384
|
+
msf.BuildingFootprintwithISO(
|
|
385
|
+
country, boundary_dir, out_dir
|
|
386
|
+
)
|
|
387
|
+
building_footprintMS = os.path.join(
|
|
388
|
+
out_dir, f"building_footprint.gpkg"
|
|
389
|
+
)
|
|
390
|
+
else:
|
|
391
|
+
building_footprintMS = EX_building_footprint
|
|
392
|
+
|
|
393
|
+
process_TIFF(
|
|
394
|
+
tif_files,
|
|
395
|
+
contingency_files,
|
|
396
|
+
building_footprintMS,
|
|
397
|
+
boundary,
|
|
398
|
+
method_path,
|
|
399
|
+
)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
#import Libraries
|
|
2
|
+
import geopandas as gpd
|
|
3
|
+
import boto3
|
|
4
|
+
import botocore
|
|
5
|
+
import os
|
|
6
|
+
import tempfile
|
|
7
|
+
|
|
8
|
+
# Initialize an anonymous S3 client
|
|
9
|
+
s3 = boto3.client(
|
|
10
|
+
's3',
|
|
11
|
+
config=botocore.config.Config(signature_version=botocore.UNSIGNED)
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
bucket_name = 'sdmlab'
|
|
15
|
+
pwb_folder = "PWB/"
|
|
16
|
+
|
|
17
|
+
def PWB_inS3(s3_client, bucket, prefix):
|
|
18
|
+
"""Download all components of a shapefile from S3 into a temporary directory."""
|
|
19
|
+
tmp_dir = tempfile.mkdtemp()
|
|
20
|
+
response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)
|
|
21
|
+
if 'Contents' not in response:
|
|
22
|
+
raise ValueError("No files found in the specified S3 folder.")
|
|
23
|
+
|
|
24
|
+
for obj in response['Contents']:
|
|
25
|
+
file_key = obj['Key']
|
|
26
|
+
file_name = os.path.basename(file_key)
|
|
27
|
+
if file_name.endswith(('.shp', '.shx', '.dbf', '.prj', '.cpg')):
|
|
28
|
+
local_path = os.path.join(tmp_dir, file_name)
|
|
29
|
+
s3_client.download_file(bucket, file_key, local_path)
|
|
30
|
+
|
|
31
|
+
shp_files = [f for f in os.listdir(tmp_dir) if f.endswith(".shp")]
|
|
32
|
+
if not shp_files:
|
|
33
|
+
raise ValueError("No .shp file found after download.")
|
|
34
|
+
|
|
35
|
+
shp_path = os.path.join(tmp_dir, shp_files[0])
|
|
36
|
+
return shp_path
|
|
37
|
+
|
|
38
|
+
def get_PWB():
|
|
39
|
+
shp_path = PWB_inS3(s3, bucket_name, pwb_folder)
|
|
40
|
+
pwb = gpd.read_file(shp_path)
|
|
41
|
+
return pwb
|