fimeval 0.1.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fimeval/BuildingFootprint/__init__.py +3 -0
- fimeval/BuildingFootprint/evaluationwithBF.py +399 -0
- fimeval/ContingencyMap/PWBs3.py +41 -0
- fimeval/ContingencyMap/__init__.py +6 -0
- fimeval/ContingencyMap/evaluationFIM.py +413 -0
- fimeval/ContingencyMap/methods.py +94 -0
- fimeval/ContingencyMap/metrics.py +44 -0
- fimeval/ContingencyMap/plotevaluationmetrics.py +102 -0
- fimeval/ContingencyMap/printcontingency.py +144 -0
- fimeval/__init__.py +11 -0
- fimeval/utilis.py +182 -0
- fimeval-0.1.43.dist-info/LICENSE.txt +661 -0
- fimeval-0.1.43.dist-info/METADATA +184 -0
- fimeval-0.1.43.dist-info/RECORD +15 -0
- fimeval-0.1.43.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import numpy as np
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import geopandas as gpd
|
|
5
|
+
import rasterio
|
|
6
|
+
import shutil
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from rasterio.warp import reproject, Resampling
|
|
9
|
+
from rasterio.io import MemoryFile
|
|
10
|
+
from rasterio import features
|
|
11
|
+
from rasterio.mask import mask
|
|
12
|
+
|
|
13
|
+
import warnings
|
|
14
|
+
|
|
15
|
+
warnings.filterwarnings("ignore", category=rasterio.errors.ShapeSkipWarning)
|
|
16
|
+
|
|
17
|
+
from .methods import AOI, smallest_extent, convex_hull, get_smallest_raster_path
|
|
18
|
+
from .metrics import evaluationmetrics
|
|
19
|
+
from .PWBs3 import get_PWB
|
|
20
|
+
from ..utilis import MakeFIMsUniform
|
|
21
|
+
|
|
22
|
+
# Function for the evalution of the model
|
|
23
|
+
def evaluateFIM(
|
|
24
|
+
benchmark_path, candidate_paths, gdf, folder, method, output_dir, shapefile=None
|
|
25
|
+
):
|
|
26
|
+
# Lists to store evaluation metrics
|
|
27
|
+
csi_values = []
|
|
28
|
+
TN_values = []
|
|
29
|
+
FP_values = []
|
|
30
|
+
FN_values = []
|
|
31
|
+
TP_values = []
|
|
32
|
+
TPR_values = []
|
|
33
|
+
FNR_values = []
|
|
34
|
+
Acc_values = []
|
|
35
|
+
Prec_values = []
|
|
36
|
+
sen_values = []
|
|
37
|
+
F1_values = []
|
|
38
|
+
POD_values = []
|
|
39
|
+
FPR_values = []
|
|
40
|
+
Merged = []
|
|
41
|
+
Unique = []
|
|
42
|
+
FAR_values = []
|
|
43
|
+
Dice_values = []
|
|
44
|
+
|
|
45
|
+
# Dynamically call the specified method
|
|
46
|
+
method = globals().get(method)
|
|
47
|
+
if method is None:
|
|
48
|
+
raise ValueError(f"Method '{method}' is not defined.")
|
|
49
|
+
|
|
50
|
+
# Save the smallest extent boundary and cliped FIMS
|
|
51
|
+
save_dir = os.path.join(output_dir, os.path.basename(folder), f"{method.__name__}")
|
|
52
|
+
os.makedirs(save_dir, exist_ok=True)
|
|
53
|
+
|
|
54
|
+
# Get the smallest matched raster extent and make a boundary shapefile
|
|
55
|
+
smallest_raster_path = get_smallest_raster_path(benchmark_path, *candidate_paths)
|
|
56
|
+
|
|
57
|
+
#If method is AOI, and direct shapefile directory is not provided, then it will search for the shapefile in the folder
|
|
58
|
+
if method.__name__ == "AOI":
|
|
59
|
+
# If shapefile is not provided, search in the folder
|
|
60
|
+
if shapefile is None:
|
|
61
|
+
for ext in (".shp", ".gpkg", ".geojson", ".kml"):
|
|
62
|
+
for file in os.listdir(folder):
|
|
63
|
+
if file.lower().endswith(ext):
|
|
64
|
+
shapefile = os.path.join(folder, file)
|
|
65
|
+
print(f"Auto-detected shapefile: {shapefile}")
|
|
66
|
+
break
|
|
67
|
+
if shapefile:
|
|
68
|
+
break
|
|
69
|
+
if shapefile is None:
|
|
70
|
+
raise FileNotFoundError(
|
|
71
|
+
"No shapefile (.shp, .gpkg, .geojson, .kml) found in the folder and none provided. Either provide a shapefile directory or put shapefile inside folder directory."
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Run AOI with the found or provided shapefile
|
|
75
|
+
bounding_geom = AOI(benchmark_path, shapefile, save_dir)
|
|
76
|
+
|
|
77
|
+
else:
|
|
78
|
+
print(f"--- {method.__name__} is processing ---")
|
|
79
|
+
bounding_geom = method(smallest_raster_path, save_dir=save_dir)
|
|
80
|
+
|
|
81
|
+
# Read and process benchmark raster
|
|
82
|
+
with rasterio.open(benchmark_path) as src1:
|
|
83
|
+
out_image1, out_transform1 = mask(
|
|
84
|
+
src1, bounding_geom, crop=True, all_touched=True
|
|
85
|
+
)
|
|
86
|
+
benchmark_nodata = src1.nodata
|
|
87
|
+
benchmark_crs = src1.crs
|
|
88
|
+
b_profile = src1.profile
|
|
89
|
+
out_image1[out_image1 == benchmark_nodata] = 0
|
|
90
|
+
out_image1 = np.where(out_image1 > 0, 2, 0).astype(np.float32)
|
|
91
|
+
gdf = gdf.to_crs(benchmark_crs)
|
|
92
|
+
shapes1 = [
|
|
93
|
+
geom for geom in gdf.geometry if geom is not None and not geom.is_empty
|
|
94
|
+
]
|
|
95
|
+
mask1 = features.geometry_mask(
|
|
96
|
+
shapes1,
|
|
97
|
+
transform=out_transform1,
|
|
98
|
+
invert=True,
|
|
99
|
+
out_shape=out_image1.shape[1:],
|
|
100
|
+
)
|
|
101
|
+
extract_b = np.where(mask1, out_image1, 0)
|
|
102
|
+
extract_b = np.where(extract_b > 0, 1, 0)
|
|
103
|
+
idx_pwb = np.where(extract_b == 1)
|
|
104
|
+
out_image1[idx_pwb] = 0
|
|
105
|
+
|
|
106
|
+
benchmark_basename = os.path.basename(benchmark_path).split(".")[0]
|
|
107
|
+
clipped_dir = os.path.join(save_dir, "MaskedFIMwithBoundary")
|
|
108
|
+
if not os.path.exists(clipped_dir):
|
|
109
|
+
os.makedirs(clipped_dir)
|
|
110
|
+
|
|
111
|
+
clipped_benchmark = os.path.join(
|
|
112
|
+
clipped_dir, f"{benchmark_basename}_clipped.tif"
|
|
113
|
+
)
|
|
114
|
+
b_profile.update(
|
|
115
|
+
{
|
|
116
|
+
"height": out_image1.shape[1],
|
|
117
|
+
"width": out_image1.shape[2],
|
|
118
|
+
"transform": out_transform1,
|
|
119
|
+
}
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
with rasterio.open(clipped_benchmark, "w", **b_profile) as dst:
|
|
123
|
+
dst.write(np.squeeze(out_image1), 1)
|
|
124
|
+
|
|
125
|
+
def resize_image(
|
|
126
|
+
source_image,
|
|
127
|
+
source_transform,
|
|
128
|
+
source_crs,
|
|
129
|
+
target_crs,
|
|
130
|
+
target_shape,
|
|
131
|
+
target_transform,
|
|
132
|
+
):
|
|
133
|
+
target_image = np.empty(target_shape, dtype=source_image.dtype)
|
|
134
|
+
reproject(
|
|
135
|
+
source=source_image,
|
|
136
|
+
destination=target_image,
|
|
137
|
+
src_transform=source_transform,
|
|
138
|
+
dst_transform=target_transform,
|
|
139
|
+
src_crs=source_crs,
|
|
140
|
+
dst_crs=target_crs,
|
|
141
|
+
resampling=Resampling.nearest,
|
|
142
|
+
)
|
|
143
|
+
return target_image
|
|
144
|
+
|
|
145
|
+
# Process each candidate file
|
|
146
|
+
for idx, candidate_path in enumerate(candidate_paths):
|
|
147
|
+
base_name = os.path.splitext(os.path.basename(candidate_path))[0]
|
|
148
|
+
with rasterio.open(candidate_path) as src2:
|
|
149
|
+
candidate = src2.read(1)
|
|
150
|
+
candidate_nodata = src2.nodata
|
|
151
|
+
candidate_transform = src2.transform
|
|
152
|
+
candidate_meta = src2.meta.copy()
|
|
153
|
+
candidate_crs = src2.crs
|
|
154
|
+
c_profile = src2.profile
|
|
155
|
+
candidate[candidate == src2.nodata] = 0
|
|
156
|
+
candidate = np.where(candidate > 0, 2, 1).astype(np.float32)
|
|
157
|
+
with MemoryFile() as memfile:
|
|
158
|
+
with memfile.open(**candidate_meta) as mem2:
|
|
159
|
+
mem2.write(candidate, 1)
|
|
160
|
+
dst_transform, width, height = (
|
|
161
|
+
rasterio.warp.calculate_default_transform(
|
|
162
|
+
mem2.crs,
|
|
163
|
+
benchmark_crs,
|
|
164
|
+
mem2.width,
|
|
165
|
+
mem2.height,
|
|
166
|
+
*mem2.bounds,
|
|
167
|
+
)
|
|
168
|
+
)
|
|
169
|
+
dst_meta = mem2.meta.copy()
|
|
170
|
+
dst_meta.update(
|
|
171
|
+
{
|
|
172
|
+
"crs": benchmark_crs,
|
|
173
|
+
"transform": dst_transform,
|
|
174
|
+
"width": width,
|
|
175
|
+
"height": height,
|
|
176
|
+
}
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
with MemoryFile() as memfile_reprojected:
|
|
180
|
+
with memfile_reprojected.open(**dst_meta) as mem2_reprojected:
|
|
181
|
+
for i in range(1, mem2.count + 1):
|
|
182
|
+
reproject(
|
|
183
|
+
source=rasterio.band(mem2, i),
|
|
184
|
+
destination=rasterio.band(mem2_reprojected, i),
|
|
185
|
+
src_transform=mem2.transform,
|
|
186
|
+
src_crs=mem2.crs,
|
|
187
|
+
dst_transform=dst_transform,
|
|
188
|
+
dst_crs=benchmark_crs,
|
|
189
|
+
resampling=Resampling.nearest,
|
|
190
|
+
)
|
|
191
|
+
out_image2, out_transform2 = mask(
|
|
192
|
+
mem2_reprojected,
|
|
193
|
+
bounding_geom,
|
|
194
|
+
crop=True,
|
|
195
|
+
all_touched=True,
|
|
196
|
+
)
|
|
197
|
+
out_image2 = np.where(
|
|
198
|
+
out_image2 == candidate_nodata, 0, out_image2
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
# Save the clipped candidate raster
|
|
202
|
+
candidate_basename = os.path.basename(candidate_path).split(
|
|
203
|
+
"."
|
|
204
|
+
)[0]
|
|
205
|
+
clipped_candidate = os.path.join(
|
|
206
|
+
clipped_dir, f"{candidate_basename}_clipped.tif"
|
|
207
|
+
)
|
|
208
|
+
b_profile.update(
|
|
209
|
+
{
|
|
210
|
+
"height": out_image1.shape[1],
|
|
211
|
+
"width": out_image1.shape[2],
|
|
212
|
+
"transform": out_transform1,
|
|
213
|
+
}
|
|
214
|
+
)
|
|
215
|
+
with rasterio.open(
|
|
216
|
+
clipped_candidate, "w", **b_profile
|
|
217
|
+
) as dst:
|
|
218
|
+
dst.write(np.squeeze(out_image2), 1)
|
|
219
|
+
|
|
220
|
+
mask2 = features.geometry_mask(
|
|
221
|
+
shapes1,
|
|
222
|
+
transform=out_transform2,
|
|
223
|
+
invert=True,
|
|
224
|
+
out_shape=(out_image2.shape[1], out_image2.shape[2]),
|
|
225
|
+
)
|
|
226
|
+
extract_c = np.where(mask2, out_image2, 0)
|
|
227
|
+
extract_c = np.where(extract_c > 0, 1, 0)
|
|
228
|
+
idx_pwc = np.where(extract_c == 1)
|
|
229
|
+
out_image2[idx_pwc] = -1
|
|
230
|
+
out_image2_resized = resize_image(
|
|
231
|
+
out_image2,
|
|
232
|
+
out_transform2,
|
|
233
|
+
mem2_reprojected.crs,
|
|
234
|
+
benchmark_crs,
|
|
235
|
+
out_image1.shape,
|
|
236
|
+
out_transform1,
|
|
237
|
+
)
|
|
238
|
+
merged = out_image1 + out_image2_resized
|
|
239
|
+
|
|
240
|
+
# Get Evaluation Metrics
|
|
241
|
+
(
|
|
242
|
+
unique_values,
|
|
243
|
+
TN,
|
|
244
|
+
FP,
|
|
245
|
+
FN,
|
|
246
|
+
TP,
|
|
247
|
+
TPR,
|
|
248
|
+
FNR,
|
|
249
|
+
Acc,
|
|
250
|
+
Prec,
|
|
251
|
+
sen,
|
|
252
|
+
CSI,
|
|
253
|
+
F1_score,
|
|
254
|
+
POD,
|
|
255
|
+
FPR,
|
|
256
|
+
merged,
|
|
257
|
+
FAR,
|
|
258
|
+
Dice,
|
|
259
|
+
) = evaluationmetrics(out_image1, out_image2_resized)
|
|
260
|
+
|
|
261
|
+
# Append values to the lists
|
|
262
|
+
csi_values.append(CSI)
|
|
263
|
+
TN_values.append(TN)
|
|
264
|
+
FP_values.append(FP)
|
|
265
|
+
FN_values.append(FN)
|
|
266
|
+
TP_values.append(TP)
|
|
267
|
+
TPR_values.append(TPR)
|
|
268
|
+
FNR_values.append(FNR)
|
|
269
|
+
Acc_values.append(Acc)
|
|
270
|
+
Prec_values.append(Prec)
|
|
271
|
+
sen_values.append(sen)
|
|
272
|
+
F1_values.append(F1_score)
|
|
273
|
+
POD_values.append(POD)
|
|
274
|
+
FPR_values.append(FPR)
|
|
275
|
+
Merged.append(merged)
|
|
276
|
+
Unique.append(unique_values)
|
|
277
|
+
FAR_values.append(FAR)
|
|
278
|
+
Dice_values.append(Dice)
|
|
279
|
+
|
|
280
|
+
results = {
|
|
281
|
+
"CSI_values": csi_values,
|
|
282
|
+
"TN_values": TN_values,
|
|
283
|
+
"FP_values": FP_values,
|
|
284
|
+
"FN_values": FN_values,
|
|
285
|
+
"TP_values": TP_values,
|
|
286
|
+
"TPR_values": TPR_values,
|
|
287
|
+
"FNR_values": FNR_values,
|
|
288
|
+
"Acc_values": Acc_values,
|
|
289
|
+
"Prec_values": Prec_values,
|
|
290
|
+
"sen_values": sen_values,
|
|
291
|
+
"F1_values": F1_values,
|
|
292
|
+
"POD_values": POD_values,
|
|
293
|
+
"FPR_values": FPR_values,
|
|
294
|
+
# 'Merged': Merged,
|
|
295
|
+
# 'Unique': Unique
|
|
296
|
+
"FAR_values": FAR_values,
|
|
297
|
+
"Dice_values": Dice_values,
|
|
298
|
+
}
|
|
299
|
+
for candidate_idx, candidate_path in enumerate(candidate_paths):
|
|
300
|
+
candidate_BASENAME = os.path.splitext(os.path.basename(candidate_path))[0]
|
|
301
|
+
merged_raster = Merged[candidate_idx]
|
|
302
|
+
if merged_raster.ndim == 3:
|
|
303
|
+
band = merged_raster.squeeze()
|
|
304
|
+
elif merged_raster.ndim == 2:
|
|
305
|
+
band = merged_raster
|
|
306
|
+
else:
|
|
307
|
+
raise ValueError(
|
|
308
|
+
f"Unexpected number of dimensions in Merged[{candidate_idx}]."
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
# Construct the contingency file name dynamically
|
|
312
|
+
contigency_dir = os.path.join(save_dir, "ContingencyMaps")
|
|
313
|
+
os.makedirs(contigency_dir, exist_ok=True)
|
|
314
|
+
output_filename = os.path.join(
|
|
315
|
+
contigency_dir, f"ContingencyMAP_{candidate_BASENAME}.tif"
|
|
316
|
+
)
|
|
317
|
+
with rasterio.open(output_filename, "w", **b_profile) as dst:
|
|
318
|
+
dst.write(band, 1)
|
|
319
|
+
dst.transform = out_transform1
|
|
320
|
+
dst.crs = benchmark_crs
|
|
321
|
+
|
|
322
|
+
# Saving it into dataframe
|
|
323
|
+
candidate_names = [
|
|
324
|
+
os.path.splitext(os.path.basename(path))[0] for path in candidate_paths
|
|
325
|
+
]
|
|
326
|
+
df = pd.DataFrame.from_dict(results, orient="index")
|
|
327
|
+
df.columns = candidate_names
|
|
328
|
+
df.reset_index(inplace=True)
|
|
329
|
+
df.rename(columns={"index": "Metrics"}, inplace=True)
|
|
330
|
+
|
|
331
|
+
# Save the DataFrame
|
|
332
|
+
evaluationMetrics_DIR = os.path.join(save_dir, "EvaluationMetrics")
|
|
333
|
+
os.makedirs(evaluationMetrics_DIR, exist_ok=True)
|
|
334
|
+
|
|
335
|
+
csv_file = os.path.join(evaluationMetrics_DIR, "EvaluationMetrics.csv")
|
|
336
|
+
df.to_csv(csv_file, index=False)
|
|
337
|
+
print(f"Evaluation metrics saved to {csv_file}")
|
|
338
|
+
return results
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def EvaluateFIM(main_dir, method_name, output_dir, PWB_dir=None, shapefile_dir=None, target_crs=None, target_resolution=None):
|
|
343
|
+
main_dir = Path(main_dir)
|
|
344
|
+
# Read the permanent water bodies
|
|
345
|
+
if PWB_dir is None:
|
|
346
|
+
gdf = get_PWB()
|
|
347
|
+
else:
|
|
348
|
+
gdf = gpd.read_file(PWB_dir)
|
|
349
|
+
|
|
350
|
+
def process_TIFF(tif_files, folder_dir):
|
|
351
|
+
benchmark_path = None
|
|
352
|
+
candidate_path = []
|
|
353
|
+
|
|
354
|
+
if len(tif_files) == 2:
|
|
355
|
+
for tif_file in tif_files:
|
|
356
|
+
if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
|
|
357
|
+
benchmark_path = tif_file
|
|
358
|
+
else:
|
|
359
|
+
candidate_path.append(tif_file)
|
|
360
|
+
|
|
361
|
+
elif len(tif_files) > 2:
|
|
362
|
+
for tif_file in tif_files:
|
|
363
|
+
if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
|
|
364
|
+
benchmark_path = tif_file
|
|
365
|
+
print(f"---Benchmark: {tif_file.name}---")
|
|
366
|
+
else:
|
|
367
|
+
candidate_path.append(tif_file)
|
|
368
|
+
|
|
369
|
+
if benchmark_path and candidate_path:
|
|
370
|
+
print(f"---Flood Inundation Evaluation of {folder_dir.name}---")
|
|
371
|
+
Metrics = evaluateFIM(
|
|
372
|
+
benchmark_path,
|
|
373
|
+
candidate_path,
|
|
374
|
+
gdf,
|
|
375
|
+
folder_dir,
|
|
376
|
+
method_name,
|
|
377
|
+
output_dir,
|
|
378
|
+
shapefile_dir,
|
|
379
|
+
)
|
|
380
|
+
print("\n", Metrics, "\n")
|
|
381
|
+
else:
|
|
382
|
+
print(
|
|
383
|
+
f"Skipping {folder_dir.name} as it doesn't have a valid benchmark and candidate configuration."
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Check if main_dir directly contains tif files
|
|
387
|
+
TIFFfiles_main_dir = list(main_dir.glob("*.tif"))
|
|
388
|
+
if TIFFfiles_main_dir:
|
|
389
|
+
MakeFIMsUniform(main_dir, target_crs=target_crs, target_resolution=target_resolution)
|
|
390
|
+
|
|
391
|
+
#processing folder
|
|
392
|
+
processing_folder = main_dir / "processing"
|
|
393
|
+
TIFFfiles = list(processing_folder.glob("*.tif"))
|
|
394
|
+
|
|
395
|
+
process_TIFF(TIFFfiles, main_dir)
|
|
396
|
+
shutil.rmtree(processing_folder)
|
|
397
|
+
else:
|
|
398
|
+
for folder in main_dir.iterdir():
|
|
399
|
+
if folder.is_dir():
|
|
400
|
+
tif_files = list(folder.glob("*.tif"))
|
|
401
|
+
|
|
402
|
+
if tif_files:
|
|
403
|
+
MakeFIMsUniform(folder, target_crs=target_crs, target_resolution=target_resolution)
|
|
404
|
+
#processing folder
|
|
405
|
+
processing_folder = folder / "processing"
|
|
406
|
+
TIFFfiles = list(processing_folder.glob("*.tif"))
|
|
407
|
+
|
|
408
|
+
process_TIFF(TIFFfiles, folder)
|
|
409
|
+
shutil.rmtree(processing_folder)
|
|
410
|
+
else:
|
|
411
|
+
print(
|
|
412
|
+
f"Skipping {folder.name} as it doesn't contain any tif files."
|
|
413
|
+
)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import rasterio
|
|
3
|
+
import numpy as np
|
|
4
|
+
from shapely.geometry import shape, box
|
|
5
|
+
import geopandas as gpd
|
|
6
|
+
from geopandas import GeoDataFrame
|
|
7
|
+
from shapely.geometry import shape, mapping
|
|
8
|
+
from rasterio.features import shapes
|
|
9
|
+
from shapely.ops import unary_union
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Smallest raster extent
|
|
13
|
+
def get_smallest_raster_path(benchmark_path, *candidate_paths):
|
|
14
|
+
def get_raster_shape(raster_path):
|
|
15
|
+
with rasterio.open(raster_path) as src:
|
|
16
|
+
return src.shape
|
|
17
|
+
|
|
18
|
+
all_paths = [benchmark_path] + list(candidate_paths)
|
|
19
|
+
smallest_raster = None
|
|
20
|
+
smallest_size = float("inf")
|
|
21
|
+
|
|
22
|
+
for raster_path in all_paths:
|
|
23
|
+
shape = get_raster_shape(raster_path)
|
|
24
|
+
size = shape[0] * shape[1]
|
|
25
|
+
if size < smallest_size:
|
|
26
|
+
smallest_size = size
|
|
27
|
+
smallest_raster = raster_path
|
|
28
|
+
return smallest_raster
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Method 1: Smallest extent
|
|
32
|
+
def smallest_extent(raster_path, save_dir):
|
|
33
|
+
with rasterio.open(raster_path) as src:
|
|
34
|
+
bounds = src.bounds
|
|
35
|
+
crs = src.crs.to_string()
|
|
36
|
+
bounding_geom = box(bounds.left, bounds.bottom, bounds.right, bounds.top)
|
|
37
|
+
|
|
38
|
+
# Save the smallest extent boundary
|
|
39
|
+
Bound_SHP = os.path.join(save_dir, "BoundaryforEvaluation")
|
|
40
|
+
if not os.path.exists(Bound_SHP):
|
|
41
|
+
os.makedirs(Bound_SHP)
|
|
42
|
+
boundary_shapefile = os.path.join(Bound_SHP, "FIMEvaluatedExtent.shp")
|
|
43
|
+
gdf = gpd.GeoDataFrame({"geometry": [bounding_geom]}, crs=crs)
|
|
44
|
+
gdf.to_file(boundary_shapefile, driver="ESRI Shapefile")
|
|
45
|
+
return [mapping(bounding_geom)]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# Method 2: Convex Hull
|
|
49
|
+
def convex_hull(raster_path, save_dir):
|
|
50
|
+
with rasterio.open(raster_path) as src:
|
|
51
|
+
raster_data = src.read(1)
|
|
52
|
+
transform = src.transform
|
|
53
|
+
nodata_value = src.nodata
|
|
54
|
+
crs = src.crs
|
|
55
|
+
|
|
56
|
+
if raster_data.dtype not in ["int16", "int32", "uint8", "uint16", "float32"]:
|
|
57
|
+
raster_data = raster_data.astype("float32")
|
|
58
|
+
|
|
59
|
+
raster_data = np.where(raster_data > 0, 1, 0).astype("uint8")
|
|
60
|
+
mask = raster_data == 1
|
|
61
|
+
|
|
62
|
+
feature_generator = shapes(raster_data, mask=mask, transform=transform)
|
|
63
|
+
polygons = [shape(feature[0]) for feature in feature_generator]
|
|
64
|
+
|
|
65
|
+
# Create GeoDataFrame from polygons
|
|
66
|
+
gdf = GeoDataFrame({"geometry": polygons}, crs=crs)
|
|
67
|
+
|
|
68
|
+
# Saving the boundary
|
|
69
|
+
Bound_SHP = os.path.join(save_dir, "BoundaryforEvaluation")
|
|
70
|
+
if not os.path.exists(Bound_SHP):
|
|
71
|
+
os.makedirs(Bound_SHP)
|
|
72
|
+
boundary_shapefile = os.path.join(Bound_SHP, "FIMEvaluatedExtent.shp")
|
|
73
|
+
|
|
74
|
+
gdf.to_file(boundary_shapefile, driver="ESRI Shapefile")
|
|
75
|
+
union_geom = unary_union(gdf.geometry)
|
|
76
|
+
bounding_geom = union_geom.convex_hull
|
|
77
|
+
|
|
78
|
+
bounding_gdf = gpd.GeoDataFrame({"geometry": [bounding_geom]}, crs=gdf.crs)
|
|
79
|
+
bounding_gdf.to_file(boundary_shapefile, driver="ESRI Shapefile")
|
|
80
|
+
return [mapping(bounding_geom)]
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# Method 3: AOI (User defined shapefile)
|
|
84
|
+
def AOI(benchmark_path, shapefile_path, save_dir):
|
|
85
|
+
with rasterio.open(benchmark_path) as src:
|
|
86
|
+
data = src.read(1)
|
|
87
|
+
if data.dtype not in ["int16", "int32", "uint8", "uint16", "float32"]:
|
|
88
|
+
data = data.astype("float32")
|
|
89
|
+
crs = src.crs
|
|
90
|
+
bounding_geom = gpd.read_file(shapefile_path)
|
|
91
|
+
bounding_geom = bounding_geom.to_crs(crs)
|
|
92
|
+
|
|
93
|
+
bounding_geom = [geom for geom in bounding_geom.geometry]
|
|
94
|
+
return bounding_geom
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
# Get all the evaluation metrics
|
|
5
|
+
def evaluationmetrics(out_image1, out_image2):
|
|
6
|
+
merged = out_image1 + out_image2
|
|
7
|
+
unique_values, counts = np.unique(merged, return_counts=True)
|
|
8
|
+
class_pixel_counts = dict(zip(unique_values, counts))
|
|
9
|
+
class_pixel_counts
|
|
10
|
+
TN = class_pixel_counts[1]
|
|
11
|
+
FP = class_pixel_counts[2]
|
|
12
|
+
FN = class_pixel_counts[3]
|
|
13
|
+
TP = class_pixel_counts[4]
|
|
14
|
+
TPR = TP / (TP + FN)
|
|
15
|
+
FNR = FN / (TP + FN)
|
|
16
|
+
Acc = (TP + TN) / (TP + TN + FP + FN)
|
|
17
|
+
Prec = TP / (TP + FP)
|
|
18
|
+
sen = TP / (TP + FN)
|
|
19
|
+
F1_score = 2 * (Prec * sen) / (Prec + sen)
|
|
20
|
+
CSI = TP / (TP + FN + FP)
|
|
21
|
+
POD = TP / (TP + FN)
|
|
22
|
+
FPR = FP / (FP + TN)
|
|
23
|
+
FAR = FP / (TP + FP)
|
|
24
|
+
Dice = 2 * TP / (2 * TP + FP + FN)
|
|
25
|
+
|
|
26
|
+
return (
|
|
27
|
+
unique_values,
|
|
28
|
+
TN,
|
|
29
|
+
FP,
|
|
30
|
+
FN,
|
|
31
|
+
TP,
|
|
32
|
+
TPR,
|
|
33
|
+
FNR,
|
|
34
|
+
Acc,
|
|
35
|
+
Prec,
|
|
36
|
+
sen,
|
|
37
|
+
CSI,
|
|
38
|
+
F1_score,
|
|
39
|
+
POD,
|
|
40
|
+
FPR,
|
|
41
|
+
merged,
|
|
42
|
+
FAR,
|
|
43
|
+
Dice,
|
|
44
|
+
)
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import glob
|
|
3
|
+
import pandas as pd
|
|
4
|
+
import plotly.express as px
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# Function to plot individual metric scores
|
|
8
|
+
def PlotMetrics(csv_path, method_path):
|
|
9
|
+
metrics_df = pd.read_csv(csv_path)
|
|
10
|
+
# Extract relevant metrics
|
|
11
|
+
metrics = metrics_df.loc[
|
|
12
|
+
metrics_df["Metrics"].isin(
|
|
13
|
+
["CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"]
|
|
14
|
+
)
|
|
15
|
+
].copy()
|
|
16
|
+
|
|
17
|
+
metrics.loc[:, "Metrics"] = metrics["Metrics"].replace(
|
|
18
|
+
{
|
|
19
|
+
"CSI_values": "CSI",
|
|
20
|
+
"POD_values": "POD",
|
|
21
|
+
"Acc_values": "Accuracy",
|
|
22
|
+
"Prec_values": "Precision",
|
|
23
|
+
"F1_values": "F1 Score",
|
|
24
|
+
}
|
|
25
|
+
)
|
|
26
|
+
value_columns = metrics.select_dtypes(include="number").columns
|
|
27
|
+
|
|
28
|
+
for value_column in value_columns:
|
|
29
|
+
metrics[value_column] = metrics[value_column].round(2)
|
|
30
|
+
|
|
31
|
+
# Create the bar plot
|
|
32
|
+
fig = px.bar(
|
|
33
|
+
metrics,
|
|
34
|
+
x=value_column,
|
|
35
|
+
y="Metrics",
|
|
36
|
+
title=f"Performance Metrics",
|
|
37
|
+
labels={value_column: "Score"},
|
|
38
|
+
text=value_column,
|
|
39
|
+
color="Metrics",
|
|
40
|
+
color_discrete_sequence=px.colors.qualitative.Set2,
|
|
41
|
+
)
|
|
42
|
+
fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
|
|
43
|
+
fig.update_layout(
|
|
44
|
+
yaxis_title="Metrics",
|
|
45
|
+
xaxis_title="Score",
|
|
46
|
+
showlegend=False,
|
|
47
|
+
plot_bgcolor="rgba(0, 0, 0, 0)",
|
|
48
|
+
paper_bgcolor="rgba(0, 0, 0, 0)",
|
|
49
|
+
margin=dict(l=10, r=10, t=40, b=10),
|
|
50
|
+
xaxis=dict(showline=True, linewidth=2, linecolor="black"),
|
|
51
|
+
yaxis=dict(showline=True, linewidth=2, linecolor="black"),
|
|
52
|
+
height=350,
|
|
53
|
+
width=900,
|
|
54
|
+
title_font=dict(family="Arial", size=24, color="black"),
|
|
55
|
+
xaxis_title_font=dict(family="Arial", size=20, color="black"),
|
|
56
|
+
yaxis_title_font=dict(family="Arial", size=20, color="black"),
|
|
57
|
+
font=dict(family="Arial", size=18, color="black"),
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Save each plot as a PNG, using the column name as the filename
|
|
61
|
+
plot_dir = os.path.join(method_path, "FinalPlots")
|
|
62
|
+
if not os.path.exists(plot_dir):
|
|
63
|
+
os.makedirs(plot_dir)
|
|
64
|
+
|
|
65
|
+
output_filename = f"EvaluationMetrics_{value_column}.png"
|
|
66
|
+
output_path = os.path.join(plot_dir, output_filename)
|
|
67
|
+
|
|
68
|
+
# Save the plot as PNG
|
|
69
|
+
fig.write_image(output_path, engine="kaleido", scale=500 / 96)
|
|
70
|
+
print(
|
|
71
|
+
f"Performance metrics chart ({value_column}) saved as PNG at {output_path}"
|
|
72
|
+
)
|
|
73
|
+
fig.show()
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def PlotEvaluationMetrics(main_dir, method_name, out_dir):
|
|
77
|
+
|
|
78
|
+
# If main directory contains the .tif files directly
|
|
79
|
+
tif_files_main = glob.glob(os.path.join(main_dir, "*.tif"))
|
|
80
|
+
if tif_files_main:
|
|
81
|
+
method_path = os.path.join(out_dir, os.path.basename(main_dir), method_name)
|
|
82
|
+
Evaluation_Metrics = os.path.join(method_path, "EvaluationMetrics")
|
|
83
|
+
csv_files = os.path.join(Evaluation_Metrics, "EvaluationMetrics.csv")
|
|
84
|
+
if not csv_files:
|
|
85
|
+
print(f"No EvaluationMetrics CSV files found in '{Evaluation_Metrics}'.")
|
|
86
|
+
else:
|
|
87
|
+
PlotMetrics(csv_files, method_path)
|
|
88
|
+
|
|
89
|
+
# Traverse all folders in main_dir if no .tif files directly in main_dir
|
|
90
|
+
else:
|
|
91
|
+
for folder in os.listdir(main_dir):
|
|
92
|
+
folder_path = os.path.join(out_dir, folder)
|
|
93
|
+
if os.path.isdir(folder_path):
|
|
94
|
+
method_path = os.path.join(folder_path, method_name)
|
|
95
|
+
Evaluation_Metrics = os.path.join(method_path, "EvaluationMetrics")
|
|
96
|
+
csv_files = os.path.join(Evaluation_Metrics, "EvaluationMetrics.csv")
|
|
97
|
+
if not csv_files:
|
|
98
|
+
print(
|
|
99
|
+
f"No EvaluationMetrics CSV files found in '{Evaluation_Metrics}'."
|
|
100
|
+
)
|
|
101
|
+
else:
|
|
102
|
+
PlotMetrics(csv_files, method_path)
|