fimeval 0.1.55__py3-none-any.whl → 0.1.57__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fimeval/BenchFIMQuery/__init__.py +5 -0
- fimeval/BenchFIMQuery/access_benchfim.py +761 -0
- fimeval/BenchFIMQuery/utilis.py +269 -0
- fimeval/BuildingFootprint/evaluationwithBF.py +107 -52
- fimeval/BuildingFootprint/microsoftBF.py +2 -0
- fimeval/ContingencyMap/evaluationFIM.py +92 -51
- fimeval/ContingencyMap/plotevaluationmetrics.py +25 -21
- fimeval/__init__.py +4 -0
- fimeval/setup_benchFIM.py +39 -0
- fimeval/utilis.py +49 -0
- {fimeval-0.1.55.dist-info → fimeval-0.1.57.dist-info}/METADATA +34 -16
- fimeval-0.1.57.dist-info/RECORD +21 -0
- fimeval-0.1.55.dist-info/RECORD +0 -17
- {fimeval-0.1.55.dist-info → fimeval-0.1.57.dist-info}/WHEEL +0 -0
- {fimeval-0.1.55.dist-info → fimeval-0.1.57.dist-info}/licenses/LICENSE.txt +0 -0
- {fimeval-0.1.55.dist-info → fimeval-0.1.57.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This utility function contains how to retrieve all the necessary metadata of benchmark FIM
|
|
3
|
+
from the s3 bucket during benchmark FIM querying.
|
|
4
|
+
|
|
5
|
+
Authors: Supath Dhital, sdhital@crimson.ua.edu
|
|
6
|
+
Updated date: 25 Nov, 2025
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
import os, re, json, datetime as dt
|
|
11
|
+
from typing import List, Dict, Any, Optional
|
|
12
|
+
|
|
13
|
+
import urllib.parse
|
|
14
|
+
import boto3
|
|
15
|
+
from botocore import UNSIGNED
|
|
16
|
+
from botocore.config import Config
|
|
17
|
+
|
|
18
|
+
# constants
|
|
19
|
+
BUCKET = "sdmlab"
|
|
20
|
+
CATALOG_KEY = (
|
|
21
|
+
"FIM_Database/FIM_Viz/catalog_core.json" # Path of the json file in the s3 bucket
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# s3 client
|
|
25
|
+
_S3 = boto3.client("s3", config=Config(signature_version=UNSIGNED))
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# helpers for direct S3 file links
|
|
29
|
+
def s3_http_url(bucket: str, key: str) -> str:
|
|
30
|
+
"""Build a public-style S3 HTTPS URL."""
|
|
31
|
+
return f"https://{bucket}.s3.amazonaws.com/{urllib.parse.quote(key, safe='/')}"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# utils
|
|
35
|
+
_YMD_RE = re.compile(r"^\d{4}-\d{2}-\d{2}$")
|
|
36
|
+
_YMD_COMPACT_RE = re.compile(r"^\d{8}$")
|
|
37
|
+
_YMDH_RE = re.compile(r"^\d{4}-\d{2}-\d{2}[ T]\d{2}$")
|
|
38
|
+
_YMDHMS_RE = re.compile(r"^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}(:\d{2})?$")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _normalize_user_dt(s: str) -> str:
|
|
42
|
+
s = s.strip()
|
|
43
|
+
s = s.replace("/", "-")
|
|
44
|
+
s = re.sub(r"\s+", " ", s)
|
|
45
|
+
return s
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _to_date(s: str) -> dt.date:
|
|
49
|
+
s = _normalize_user_dt(s)
|
|
50
|
+
if _YMD_COMPACT_RE.match(s):
|
|
51
|
+
return dt.datetime.strptime(s, "%Y%m%d").date()
|
|
52
|
+
if _YMD_RE.match(s):
|
|
53
|
+
return dt.date.fromisoformat(s)
|
|
54
|
+
try:
|
|
55
|
+
return dt.datetime.fromisoformat(s).date()
|
|
56
|
+
except Exception:
|
|
57
|
+
m = re.match(r"^(\d{4}-\d{2}-\d{2})[ T](\d{2})$", s)
|
|
58
|
+
if m:
|
|
59
|
+
return dt.datetime.fromisoformat(f"{m.group(1)} {m.group(2)}:00:00").date()
|
|
60
|
+
raise ValueError(f"Bad date format: {s}")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _to_hour_or_none(s: str) -> Optional[int]:
|
|
64
|
+
s = _normalize_user_dt(s)
|
|
65
|
+
if _YMD_RE.match(s) or _YMD_COMPACT_RE.match(s):
|
|
66
|
+
return None
|
|
67
|
+
m = re.match(r"^\d{4}-\d{2}-\d{2}[ T](\d{2})$", s)
|
|
68
|
+
if m:
|
|
69
|
+
return int(m.group(1))
|
|
70
|
+
try:
|
|
71
|
+
dt_obj = dt.datetime.fromisoformat(s)
|
|
72
|
+
return dt_obj.hour
|
|
73
|
+
except Exception:
|
|
74
|
+
m2 = re.match(r"^\d{4}-\d{2}-\d{2}T(\d{2})$", s)
|
|
75
|
+
if m2:
|
|
76
|
+
return int(m2.group(1))
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _record_day(rec: Dict[str, Any]) -> Optional[dt.date]:
|
|
81
|
+
ymd = rec.get("date_ymd")
|
|
82
|
+
if isinstance(ymd, str):
|
|
83
|
+
try:
|
|
84
|
+
return dt.date.fromisoformat(ymd)
|
|
85
|
+
except Exception:
|
|
86
|
+
pass
|
|
87
|
+
raw = rec.get("date_of_flood")
|
|
88
|
+
if isinstance(raw, str) and len(raw) >= 8:
|
|
89
|
+
try:
|
|
90
|
+
return dt.datetime.strptime(raw[:8], "%Y%m%d").date()
|
|
91
|
+
except Exception:
|
|
92
|
+
return None
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _record_hour_or_none(rec: Dict[str, Any]) -> Optional[int]:
|
|
97
|
+
raw = rec.get("date_of_flood")
|
|
98
|
+
if isinstance(raw, str) and "T" in raw and len(raw) >= 11:
|
|
99
|
+
try:
|
|
100
|
+
return int(raw.split("T", 1)[1][:2])
|
|
101
|
+
except Exception:
|
|
102
|
+
return None
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
# Printing helpers
|
|
107
|
+
def _pretty_date_for_print(rec: Dict[str, Any]) -> str:
|
|
108
|
+
raw = rec.get("date_of_flood")
|
|
109
|
+
if isinstance(raw, str) and "T" in raw and len(raw) >= 11:
|
|
110
|
+
return f"{raw[:4]}-{raw[4:6]}-{raw[6:8]}T{raw.split('T',1)[1][:2]}"
|
|
111
|
+
ymd = rec.get("date_ymd")
|
|
112
|
+
if isinstance(ymd, str) and _YMD_RE.match(ymd):
|
|
113
|
+
return ymd
|
|
114
|
+
if isinstance(raw, str) and len(raw) >= 8:
|
|
115
|
+
return f"{raw[:4]}-{raw[4:6]}-{raw[6:8]}"
|
|
116
|
+
return "unknown"
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _context_str(
|
|
120
|
+
huc8: Optional[str] = None,
|
|
121
|
+
date_input: Optional[str] = None,
|
|
122
|
+
file_name: Optional[str] = None,
|
|
123
|
+
start_date: Optional[str] = None,
|
|
124
|
+
end_date: Optional[str] = None,
|
|
125
|
+
) -> str:
|
|
126
|
+
"""
|
|
127
|
+
Builds a readable context summary for printing headers.
|
|
128
|
+
Example outputs:
|
|
129
|
+
- "HUC 12090301"
|
|
130
|
+
- "HUC 12090301, date '2017-08-30'"
|
|
131
|
+
- "HUC 12090301, range 2017-08-30 to 2017-09-01"
|
|
132
|
+
- "HUC 12090301, file 'PSS_3_0m_20170830T162251_BM.tif'"
|
|
133
|
+
"""
|
|
134
|
+
parts = []
|
|
135
|
+
if huc8:
|
|
136
|
+
parts.append(f"HUC {huc8}")
|
|
137
|
+
if date_input:
|
|
138
|
+
parts.append(f"date '{date_input}'")
|
|
139
|
+
if start_date or end_date:
|
|
140
|
+
if start_date and end_date:
|
|
141
|
+
parts.append(f"range {start_date} to {end_date}")
|
|
142
|
+
elif start_date:
|
|
143
|
+
parts.append(f"from {start_date}")
|
|
144
|
+
elif end_date:
|
|
145
|
+
parts.append(f"until {end_date}")
|
|
146
|
+
if file_name:
|
|
147
|
+
parts.append(f"file '{file_name}'")
|
|
148
|
+
|
|
149
|
+
return ", ".join(parts) if parts else "your filters"
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def format_records_for_print(
|
|
153
|
+
records: List[Dict[str, Any]], context: Optional[str] = None
|
|
154
|
+
) -> str:
|
|
155
|
+
if not records:
|
|
156
|
+
ctx = context or "your filters"
|
|
157
|
+
return f"Benchmark FIMs were not matched for {ctx}."
|
|
158
|
+
|
|
159
|
+
header = (
|
|
160
|
+
f"Following are the available benchmark data for {context}:\n"
|
|
161
|
+
if context
|
|
162
|
+
else ""
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def _is_synthetic_tier_local(r: Dict[str, Any]) -> bool:
|
|
166
|
+
t = str(r.get("tier") or r.get("quality") or "").lower()
|
|
167
|
+
return "tier_4" in t or t.strip() == "4"
|
|
168
|
+
|
|
169
|
+
def _return_period_text_local(r: Dict[str, Any]) -> str:
|
|
170
|
+
rp = (
|
|
171
|
+
r.get("return_period")
|
|
172
|
+
or r.get("return_period_yr")
|
|
173
|
+
or r.get("rp")
|
|
174
|
+
or r.get("rp_years")
|
|
175
|
+
)
|
|
176
|
+
if rp is None:
|
|
177
|
+
return "synthetic flow (return period unknown)"
|
|
178
|
+
try:
|
|
179
|
+
rp_int = int(float(str(rp).strip().replace("yr", "").replace("-year", "")))
|
|
180
|
+
return f"{rp_int}-year synthetic flow"
|
|
181
|
+
except Exception:
|
|
182
|
+
return f"{rp} synthetic flow"
|
|
183
|
+
|
|
184
|
+
blocks: List[str] = []
|
|
185
|
+
for r in records:
|
|
186
|
+
tier = r.get("tier") or r.get("quality") or "Unknown"
|
|
187
|
+
res = r.get("resolution_m")
|
|
188
|
+
res_txt = f"{res}m" if res is not None else "NA"
|
|
189
|
+
fname = r.get("file_name") or "NA"
|
|
190
|
+
|
|
191
|
+
# Build lines with Tier-aware event text
|
|
192
|
+
lines = [f"Data Tier: {tier}"]
|
|
193
|
+
if _is_synthetic_tier_local(r):
|
|
194
|
+
lines.append(f"Return Period: {_return_period_text_local(r)}")
|
|
195
|
+
else:
|
|
196
|
+
date_str = _pretty_date_for_print(r)
|
|
197
|
+
lines.append(f"Benchmark FIM date: {date_str}")
|
|
198
|
+
|
|
199
|
+
lines.extend([
|
|
200
|
+
f"Spatial Resolution: {res_txt}",
|
|
201
|
+
f"Benchmark FIM raster name in DB: {fname}",
|
|
202
|
+
])
|
|
203
|
+
blocks.append("\n".join(lines))
|
|
204
|
+
|
|
205
|
+
return (header + "\n\n".join(blocks)).strip()
|
|
206
|
+
|
|
207
|
+
# S3 and json catalog
|
|
208
|
+
def load_catalog_core() -> Dict[str, Any]:
|
|
209
|
+
obj = _S3.get_object(Bucket=BUCKET, Key=CATALOG_KEY)
|
|
210
|
+
return json.loads(obj["Body"].read().decode("utf-8", "replace"))
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def _list_prefix(prefix: str) -> List[str]:
|
|
214
|
+
keys: List[str] = []
|
|
215
|
+
paginator = _S3.get_paginator("list_objects_v2")
|
|
216
|
+
for page in paginator.paginate(Bucket=BUCKET, Prefix=prefix):
|
|
217
|
+
for obj in page.get("Contents", []) or []:
|
|
218
|
+
keys.append(obj["Key"])
|
|
219
|
+
return keys
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _download(bucket: str, key: str, dest_path: str) -> str:
|
|
223
|
+
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
|
224
|
+
_S3.download_file(bucket, key, dest_path)
|
|
225
|
+
return dest_path
|
|
226
|
+
|
|
227
|
+
# Get the files from s3 bucket
|
|
228
|
+
def _folder_from_record(rec: Dict[str, Any]) -> str:
|
|
229
|
+
s3_key = rec.get("s3_key")
|
|
230
|
+
if not s3_key or "/" not in s3_key:
|
|
231
|
+
raise ValueError("Record lacks s3_key to derive folder")
|
|
232
|
+
return s3_key.rsplit("/", 1)[0] + "/"
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _tif_key_from_record(rec: Dict[str, Any]) -> Optional[str]:
|
|
236
|
+
tif_url = rec.get("tif_url")
|
|
237
|
+
if isinstance(tif_url, str) and ".amazonaws.com/" in tif_url:
|
|
238
|
+
return tif_url.split(".amazonaws.com/", 1)[1]
|
|
239
|
+
fname = rec.get("file_name")
|
|
240
|
+
if not fname:
|
|
241
|
+
return None
|
|
242
|
+
return _folder_from_record(rec) + fname
|
|
243
|
+
|
|
244
|
+
#Download that tif and the boundary file --> need to add building footprint automation as well.
|
|
245
|
+
def download_fim_assets(record: Dict[str, Any], dest_dir: str) -> Dict[str, Any]:
|
|
246
|
+
"""
|
|
247
|
+
Download the .tif (if present) and any .gpkg from the record's folder to dest_dir.
|
|
248
|
+
"""
|
|
249
|
+
os.makedirs(dest_dir, exist_ok=True)
|
|
250
|
+
out = {"tif": None, "gpkg_files": []}
|
|
251
|
+
|
|
252
|
+
# TIF
|
|
253
|
+
tif_key = _tif_key_from_record(record)
|
|
254
|
+
if tif_key:
|
|
255
|
+
local = os.path.join(dest_dir, os.path.basename(tif_key))
|
|
256
|
+
if not os.path.exists(local):
|
|
257
|
+
_download(BUCKET, tif_key, local)
|
|
258
|
+
out["tif"] = local
|
|
259
|
+
|
|
260
|
+
# GPKGs (list folder)
|
|
261
|
+
folder = _folder_from_record(record)
|
|
262
|
+
for key in _list_prefix(folder):
|
|
263
|
+
if key.lower().endswith(".gpkg"):
|
|
264
|
+
local = os.path.join(dest_dir, os.path.basename(key))
|
|
265
|
+
if not os.path.exists(local):
|
|
266
|
+
_download(BUCKET, key, local)
|
|
267
|
+
out["gpkg_files"].append(local)
|
|
268
|
+
|
|
269
|
+
return out
|
|
@@ -8,6 +8,7 @@ from plotly.subplots import make_subplots
|
|
|
8
8
|
import plotly.graph_objects as go
|
|
9
9
|
import seaborn as sns
|
|
10
10
|
import matplotlib.pyplot as plt
|
|
11
|
+
import matplotlib.gridspec as gridspec
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def Changeintogpkg(input_path, output_dir, layer_name):
|
|
@@ -21,8 +22,8 @@ def Changeintogpkg(input_path, output_dir, layer_name):
|
|
|
21
22
|
output_gpkg = os.path.join(output_dir, f"{layer_name}.gpkg")
|
|
22
23
|
gdf.to_file(output_gpkg, driver="GPKG")
|
|
23
24
|
return output_gpkg
|
|
24
|
-
|
|
25
|
-
|
|
25
|
+
|
|
26
|
+
|
|
26
27
|
def GetFloodedBuildingCountInfo(
|
|
27
28
|
building_fp_path,
|
|
28
29
|
study_area_path,
|
|
@@ -76,8 +77,12 @@ def GetFloodedBuildingCountInfo(
|
|
|
76
77
|
|
|
77
78
|
count_centroids_in_contingency(contingency_map)
|
|
78
79
|
|
|
79
|
-
centroid_counts["Candidate"] =
|
|
80
|
-
|
|
80
|
+
centroid_counts["Candidate"] = (
|
|
81
|
+
centroid_counts["True Positive"] + centroid_counts["False Positive"]
|
|
82
|
+
)
|
|
83
|
+
centroid_counts["Benchmark"] = (
|
|
84
|
+
centroid_counts["True Positive"] + centroid_counts["False Negative"]
|
|
85
|
+
)
|
|
81
86
|
|
|
82
87
|
total_buildings = len(clipped_buildings)
|
|
83
88
|
percentages = {
|
|
@@ -93,7 +98,9 @@ def GetFloodedBuildingCountInfo(
|
|
|
93
98
|
FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
|
|
94
99
|
POD = TP / (TP + FN) if (TP + FN) > 0 else 0
|
|
95
100
|
if centroid_counts["Benchmark"] > 0:
|
|
96
|
-
BDR = (
|
|
101
|
+
BDR = (
|
|
102
|
+
centroid_counts["Candidate"] - centroid_counts["Benchmark"]
|
|
103
|
+
) / centroid_counts["Benchmark"]
|
|
97
104
|
else:
|
|
98
105
|
BDR = 0
|
|
99
106
|
|
|
@@ -122,7 +129,9 @@ def GetFloodedBuildingCountInfo(
|
|
|
122
129
|
],
|
|
123
130
|
}
|
|
124
131
|
counts_df = pd.DataFrame(counts_data)
|
|
125
|
-
csv_file_path = os.path.join(
|
|
132
|
+
csv_file_path = os.path.join(
|
|
133
|
+
save_dir, "EvaluationMetrics", f"BuildingCounts_{basename}.csv"
|
|
134
|
+
)
|
|
126
135
|
os.makedirs(os.path.dirname(csv_file_path), exist_ok=True)
|
|
127
136
|
counts_df.to_csv(csv_file_path, index=False)
|
|
128
137
|
|
|
@@ -135,98 +144,146 @@ def GetFloodedBuildingCountInfo(
|
|
|
135
144
|
]
|
|
136
145
|
|
|
137
146
|
fig = make_subplots(
|
|
138
|
-
rows=1,
|
|
139
|
-
|
|
147
|
+
rows=1,
|
|
148
|
+
cols=2,
|
|
149
|
+
subplot_titles=(
|
|
150
|
+
"Building Counts on Different FIMs",
|
|
151
|
+
"Contingency Flooded Building Counts",
|
|
152
|
+
),
|
|
140
153
|
)
|
|
141
154
|
|
|
142
155
|
fig.add_trace(
|
|
143
156
|
go.Bar(
|
|
144
|
-
x=["Candidate"],
|
|
145
|
-
|
|
146
|
-
|
|
157
|
+
x=["Candidate"],
|
|
158
|
+
y=[centroid_counts["Candidate"]],
|
|
159
|
+
text=[f"{centroid_counts['Candidate']}"],
|
|
160
|
+
textposition="auto",
|
|
161
|
+
marker_color="#1c83eb",
|
|
162
|
+
marker_line_color="black",
|
|
163
|
+
marker_line_width=1,
|
|
147
164
|
name=f"Candidate ({percentages['Candidate']:.2f}%)",
|
|
148
165
|
),
|
|
149
|
-
row=1,
|
|
166
|
+
row=1,
|
|
167
|
+
col=1,
|
|
150
168
|
)
|
|
151
169
|
fig.add_trace(
|
|
152
170
|
go.Bar(
|
|
153
|
-
x=["Benchmark"],
|
|
154
|
-
|
|
155
|
-
|
|
171
|
+
x=["Benchmark"],
|
|
172
|
+
y=[centroid_counts["Benchmark"]],
|
|
173
|
+
text=[f"{centroid_counts['Benchmark']}"],
|
|
174
|
+
textposition="auto",
|
|
175
|
+
marker_color="#a4490e",
|
|
176
|
+
marker_line_color="black",
|
|
177
|
+
marker_line_width=1,
|
|
156
178
|
name=f"Benchmark ({percentages['Benchmark']:.2f}%)",
|
|
157
179
|
),
|
|
158
|
-
row=1,
|
|
180
|
+
row=1,
|
|
181
|
+
col=1,
|
|
159
182
|
)
|
|
160
183
|
|
|
161
184
|
for i, label in enumerate(third_raster_labels):
|
|
162
185
|
fig.add_trace(
|
|
163
186
|
go.Bar(
|
|
164
|
-
x=[label],
|
|
165
|
-
|
|
187
|
+
x=[label],
|
|
188
|
+
y=[third_raster_counts[i]],
|
|
189
|
+
text=[f"{third_raster_counts[i]}"],
|
|
190
|
+
textposition="auto",
|
|
166
191
|
marker_color=["#ff5733", "#ffc300", "#28a745"][i],
|
|
167
|
-
marker_line_color="black",
|
|
192
|
+
marker_line_color="black",
|
|
193
|
+
marker_line_width=1,
|
|
168
194
|
name=f"{label} ({percentages[label]:.2f}%)",
|
|
169
195
|
),
|
|
170
|
-
row=1,
|
|
196
|
+
row=1,
|
|
197
|
+
col=2,
|
|
171
198
|
)
|
|
172
199
|
|
|
173
200
|
fig.update_layout(
|
|
174
201
|
title="Flooded Building Counts",
|
|
175
202
|
xaxis_title="Inundation Surface",
|
|
176
203
|
yaxis_title="Flooded Building Counts",
|
|
177
|
-
width=1100,
|
|
178
|
-
|
|
179
|
-
|
|
204
|
+
width=1100,
|
|
205
|
+
height=400,
|
|
206
|
+
plot_bgcolor="rgba(0,0,0,0)",
|
|
207
|
+
paper_bgcolor="rgba(0,0,0,0)",
|
|
208
|
+
showlegend=True,
|
|
209
|
+
font=dict(family="Arial", size=18, color="black"),
|
|
180
210
|
)
|
|
181
211
|
fig.show()
|
|
182
212
|
|
|
183
|
-
# Seaborn for static PNG
|
|
184
|
-
df_left = pd.DataFrame(
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
213
|
+
# Seaborn for static PNG
|
|
214
|
+
df_left = pd.DataFrame(
|
|
215
|
+
{
|
|
216
|
+
"Category": ["Candidate", "Benchmark"],
|
|
217
|
+
"Count": [centroid_counts["Candidate"], centroid_counts["Benchmark"]],
|
|
218
|
+
}
|
|
219
|
+
)
|
|
220
|
+
df_right = pd.DataFrame(
|
|
221
|
+
{
|
|
222
|
+
"Category": third_raster_labels,
|
|
223
|
+
"Count": third_raster_counts,
|
|
224
|
+
}
|
|
225
|
+
)
|
|
192
226
|
|
|
193
227
|
sns.set_theme(style="whitegrid")
|
|
194
|
-
|
|
228
|
+
|
|
229
|
+
fig_sb = plt.figure(figsize=(10, 3), constrained_layout=True)
|
|
230
|
+
gs = gridspec.GridSpec(1, 3, figure=fig_sb, width_ratios=[1, 1, 0.4])
|
|
231
|
+
|
|
232
|
+
ax0 = fig_sb.add_subplot(gs[0, 0])
|
|
233
|
+
ax1 = fig_sb.add_subplot(gs[0, 1])
|
|
234
|
+
ax_leg = fig_sb.add_subplot(gs[0, 2])
|
|
235
|
+
ax_leg.axis("off")
|
|
195
236
|
|
|
196
237
|
def style_axes(ax, title_text, xlab, show_ylabel: bool):
|
|
197
|
-
|
|
198
|
-
ax.
|
|
199
|
-
ax.set_xlabel(xlab, fontsize=14, color="black")
|
|
238
|
+
ax.set_title(title_text, fontsize=14, pad=15)
|
|
239
|
+
ax.set_xlabel(xlab, fontsize=13, color="black")
|
|
200
240
|
if show_ylabel:
|
|
201
|
-
ax.set_ylabel("Flooded Building Counts", fontsize=
|
|
241
|
+
ax.set_ylabel("Flooded Building Counts", fontsize=13, color="black")
|
|
202
242
|
else:
|
|
203
243
|
ax.set_ylabel("")
|
|
204
244
|
|
|
205
|
-
# Thicker black left/bottom spines
|
|
206
245
|
for spine in ("left", "bottom"):
|
|
207
246
|
ax.spines[spine].set_linewidth(1.5)
|
|
208
247
|
ax.spines[spine].set_color("black")
|
|
209
248
|
|
|
210
249
|
sns.despine(ax=ax, right=True, top=True)
|
|
211
|
-
ax.tick_params(axis="x", labelsize=
|
|
212
|
-
ax.tick_params(axis="y", labelsize=
|
|
250
|
+
ax.tick_params(axis="x", labelsize=11, colors="black")
|
|
251
|
+
ax.tick_params(axis="y", labelsize=11, colors="black")
|
|
213
252
|
|
|
214
253
|
# Left panel
|
|
215
|
-
|
|
216
|
-
sns.barplot(data=df_left, x="Category", y="Count", ax=ax0,
|
|
217
|
-
|
|
218
|
-
style_axes(ax0, "Building Counts on Different FIMs", "Inundation Surface", show_ylabel=True)
|
|
254
|
+
colors_left = ["#1c83eb", "#a4490e"]
|
|
255
|
+
sns.barplot(data=df_left, x="Category", y="Count", ax=ax0, palette=colors_left)
|
|
256
|
+
style_axes(ax0, "Building Counts on Different FIMs", "Inundation Surface", True)
|
|
219
257
|
for c in ax0.containers:
|
|
220
|
-
ax0.bar_label(
|
|
258
|
+
ax0.bar_label(
|
|
259
|
+
c, fmt="%.0f", label_type="edge", padding=3, fontsize=12, color="black"
|
|
260
|
+
)
|
|
221
261
|
|
|
222
262
|
# Right panel
|
|
223
|
-
|
|
224
|
-
sns.barplot(data=df_right, x="Category", y="Count", ax=ax1,
|
|
225
|
-
|
|
226
|
-
style_axes(ax1, "Contingency Flooded Building Counts", "Category", show_ylabel=False)
|
|
263
|
+
colors_right = ["#ff5733", "#ffc300", "#28a745"]
|
|
264
|
+
sns.barplot(data=df_right, x="Category", y="Count", ax=ax1, palette=colors_right)
|
|
265
|
+
style_axes(ax1, "Contingency Flooded Building Counts", "Category", False)
|
|
227
266
|
for c in ax1.containers:
|
|
228
|
-
ax1.bar_label(
|
|
267
|
+
ax1.bar_label(
|
|
268
|
+
c, fmt="%.0f", label_type="edge", padding=3, fontsize=12, color="black"
|
|
269
|
+
)
|
|
229
270
|
|
|
271
|
+
# Combined legend
|
|
272
|
+
all_labels = ["Candidate", "Benchmark"] + third_raster_labels
|
|
273
|
+
all_colors = colors_left + colors_right
|
|
274
|
+
legend_handles = [
|
|
275
|
+
plt.Line2D(
|
|
276
|
+
[0],
|
|
277
|
+
[0],
|
|
278
|
+
marker="s",
|
|
279
|
+
color="w",
|
|
280
|
+
markerfacecolor=all_colors[i],
|
|
281
|
+
markersize=12,
|
|
282
|
+
label=f"{all_labels[i]} ({percentages[all_labels[i]]:.2f}%)",
|
|
283
|
+
)
|
|
284
|
+
for i in range(len(all_labels))
|
|
285
|
+
]
|
|
286
|
+
ax_leg.legend(handles=legend_handles, fontsize=12, loc="center left", frameon=True)
|
|
230
287
|
plot_dir = os.path.join(save_dir, "FinalPlots")
|
|
231
288
|
os.makedirs(plot_dir, exist_ok=True)
|
|
232
289
|
output_path = os.path.join(plot_dir, f"BuildingCounts_{basename}.png")
|
|
@@ -236,8 +293,6 @@ def GetFloodedBuildingCountInfo(
|
|
|
236
293
|
print(f"PNG were saved in : {output_path}")
|
|
237
294
|
|
|
238
295
|
|
|
239
|
-
|
|
240
|
-
|
|
241
296
|
def process_TIFF(
|
|
242
297
|
tif_files, contingency_files, building_footprint, boundary, method_path
|
|
243
298
|
):
|
|
@@ -130,3 +130,5 @@ def BuildingFootprintwithISO(countryISO, ROI, out_dir, geeprojectID=None):
|
|
|
130
130
|
getBuildingFootprintSpark(
|
|
131
131
|
countryISO, ROI, out_dir, tile_size=0.05, projectID=geeprojectID
|
|
132
132
|
)
|
|
133
|
+
|
|
134
|
+
BuildingFootprintwithISO("USA", "/Users/supath/Downloads/S1A_9_6m_20190530T23573_910244W430506N_AOI.gpkg", "/Users/supath/Downloads/AOI", geeprojectID="supathdh")
|