fimeval 0.1.53__py3-none-any.whl → 0.1.55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,6 +6,8 @@ import pandas as pd
6
6
  from pathlib import Path
7
7
  from plotly.subplots import make_subplots
8
8
  import plotly.graph_objects as go
9
+ import seaborn as sns
10
+ import matplotlib.pyplot as plt
9
11
 
10
12
 
11
13
  def Changeintogpkg(input_path, output_dir, layer_name):
@@ -19,8 +21,8 @@ def Changeintogpkg(input_path, output_dir, layer_name):
19
21
  output_gpkg = os.path.join(output_dir, f"{layer_name}.gpkg")
20
22
  gdf.to_file(output_gpkg, driver="GPKG")
21
23
  return output_gpkg
22
-
23
-
24
+
25
+
24
26
  def GetFloodedBuildingCountInfo(
25
27
  building_fp_path,
26
28
  study_area_path,
@@ -31,7 +33,6 @@ def GetFloodedBuildingCountInfo(
31
33
  basename,
32
34
  ):
33
35
  output_dir = os.path.dirname(building_fp_path)
34
-
35
36
  building_fp_gpkg = Changeintogpkg(
36
37
  building_fp_path, output_dir, "building_footprint"
37
38
  )
@@ -42,7 +43,6 @@ def GetFloodedBuildingCountInfo(
42
43
  with rasterio.open(raster1_path) as src:
43
44
  target_crs = str(src.crs)
44
45
 
45
- # Reproject all GeoDataFrames to the target CRS
46
46
  if building_gdf.crs != target_crs:
47
47
  building_gdf = building_gdf.to_crs(target_crs)
48
48
  print("reproject building_gdf")
@@ -55,43 +55,29 @@ def GetFloodedBuildingCountInfo(
55
55
  clipped_buildings["centroid"] = clipped_buildings.geometry.centroid
56
56
 
57
57
  centroid_counts = {
58
- "Benchmark": 0,
59
- "Candidate": 0,
60
58
  "False Positive": 0,
61
59
  "False Negative": 0,
62
60
  "True Positive": 0,
63
61
  }
64
62
 
65
- def count_centroids_in_raster(raster_path, label):
63
+ def count_centroids_in_contingency(raster_path):
66
64
  with rasterio.open(raster_path) as src:
67
65
  raster_data = src.read(1)
68
- transform = src.transform
69
-
70
66
  for centroid in clipped_buildings["centroid"]:
71
67
  row, col = src.index(centroid.x, centroid.y)
72
68
  if 0 <= row < raster_data.shape[0] and 0 <= col < raster_data.shape[1]:
73
69
  pixel_value = raster_data[row, col]
74
- if label in ["Benchmark", "Candidate"]:
75
- if pixel_value == 2: # False Positive
76
- centroid_counts[label] += 1
77
- else:
78
- if pixel_value == 2:
79
- centroid_counts["False Positive"] += 1
80
- elif pixel_value == 3:
81
- centroid_counts["False Negative"] += 1
82
- elif pixel_value == 4:
83
- centroid_counts["True Positive"] += 1
84
-
85
- if "bm" in str(raster1_path).lower():
86
- count_centroids_in_raster(raster1_path, "Benchmark")
87
- count_centroids_in_raster(raster2_path, "Candidate")
70
+ if pixel_value == 2:
71
+ centroid_counts["False Positive"] += 1
72
+ elif pixel_value == 3:
73
+ centroid_counts["False Negative"] += 1
74
+ elif pixel_value == 4:
75
+ centroid_counts["True Positive"] += 1
88
76
 
89
- elif "candidate" in str(raster2_path).lower():
90
- count_centroids_in_raster(raster1_path, "Candidate")
91
- count_centroids_in_raster(raster2_path, "Benchmark")
77
+ count_centroids_in_contingency(contingency_map)
92
78
 
93
- if "contingency" in str(contingency_map).lower():
94
- count_centroids_in_raster(contingency_map, "Contingency")
79
+ centroid_counts["Candidate"] = centroid_counts["True Positive"] + centroid_counts["False Positive"]
80
+ centroid_counts["Benchmark"] = centroid_counts["True Positive"] + centroid_counts["False Negative"]
95
81
 
96
82
  total_buildings = len(clipped_buildings)
97
83
  percentages = {
@@ -106,10 +92,10 @@ def GetFloodedBuildingCountInfo(
106
92
  CSI = TP / (TP + FP + FN) if (TP + FP + FN) > 0 else 0
107
93
  FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
108
94
  POD = TP / (TP + FN) if (TP + FN) > 0 else 0
109
-
110
- BDR = (
111
- centroid_counts["Candidate"] - centroid_counts["Benchmark"]
112
- ) / centroid_counts["Benchmark"]
95
+ if centroid_counts["Benchmark"] > 0:
96
+ BDR = (centroid_counts["Candidate"] - centroid_counts["Benchmark"]) / centroid_counts["Benchmark"]
97
+ else:
98
+ BDR = 0
113
99
 
114
100
  counts_data = {
115
101
  "Category": [
@@ -135,13 +121,12 @@ def GetFloodedBuildingCountInfo(
135
121
  f"{BDR:.3f}",
136
122
  ],
137
123
  }
138
-
139
124
  counts_df = pd.DataFrame(counts_data)
140
- csv_file_path = os.path.join(
141
- save_dir, "EvaluationMetrics", f"BuildingCounts_{basename}.csv"
142
- )
125
+ csv_file_path = os.path.join(save_dir, "EvaluationMetrics", f"BuildingCounts_{basename}.csv")
126
+ os.makedirs(os.path.dirname(csv_file_path), exist_ok=True)
143
127
  counts_df.to_csv(csv_file_path, index=False)
144
128
 
129
+ # Plotly interactive visualization only
145
130
  third_raster_labels = ["False Positive", "False Negative", "True Positive"]
146
131
  third_raster_counts = [
147
132
  centroid_counts["False Positive"],
@@ -150,81 +135,109 @@ def GetFloodedBuildingCountInfo(
150
135
  ]
151
136
 
152
137
  fig = make_subplots(
153
- rows=1,
154
- cols=2,
155
- subplot_titles=(
156
- "Building Counts on Different FIMs",
157
- "Contingency Flooded Building Counts",
158
- ),
138
+ rows=1, cols=2,
139
+ subplot_titles=("Building Counts on Different FIMs", "Contingency Flooded Building Counts"),
159
140
  )
160
141
 
161
142
  fig.add_trace(
162
143
  go.Bar(
163
- x=["Candidate"],
164
- y=[centroid_counts["Candidate"]],
165
- text=[f"{centroid_counts['Candidate']}"],
166
- textposition="auto",
167
- marker_color="#1c83eb",
168
- marker_line_color="black",
169
- marker_line_width=1,
144
+ x=["Candidate"], y=[centroid_counts["Candidate"]],
145
+ text=[f"{centroid_counts['Candidate']}"], textposition="auto",
146
+ marker_color="#1c83eb", marker_line_color="black", marker_line_width=1,
170
147
  name=f"Candidate ({percentages['Candidate']:.2f}%)",
171
148
  ),
172
- row=1,
173
- col=1,
149
+ row=1, col=1,
174
150
  )
175
-
176
151
  fig.add_trace(
177
152
  go.Bar(
178
- x=["Benchmark"],
179
- y=[centroid_counts["Benchmark"]],
180
- text=[f"{centroid_counts['Benchmark']}"],
181
- textposition="auto",
182
- marker_color="#a4490e",
183
- marker_line_color="black",
184
- marker_line_width=1,
153
+ x=["Benchmark"], y=[centroid_counts["Benchmark"]],
154
+ text=[f"{centroid_counts['Benchmark']}"], textposition="auto",
155
+ marker_color="#a4490e", marker_line_color="black", marker_line_width=1,
185
156
  name=f"Benchmark ({percentages['Benchmark']:.2f}%)",
186
157
  ),
187
- row=1,
188
- col=1,
158
+ row=1, col=1,
189
159
  )
190
160
 
191
- for i in range(len(third_raster_labels)):
161
+ for i, label in enumerate(third_raster_labels):
192
162
  fig.add_trace(
193
163
  go.Bar(
194
- x=[third_raster_labels[i]],
195
- y=[third_raster_counts[i]],
196
- text=[f"{third_raster_counts[i]}"],
197
- textposition="auto",
164
+ x=[label], y=[third_raster_counts[i]],
165
+ text=[f"{third_raster_counts[i]}"], textposition="auto",
198
166
  marker_color=["#ff5733", "#ffc300", "#28a745"][i],
199
- marker_line_color="black",
200
- marker_line_width=1,
201
- name=f"{third_raster_labels[i]} ({percentages[third_raster_labels[i]]:.2f}%)",
167
+ marker_line_color="black", marker_line_width=1,
168
+ name=f"{label} ({percentages[label]:.2f}%)",
202
169
  ),
203
- row=1,
204
- col=2,
170
+ row=1, col=2,
205
171
  )
206
172
 
207
173
  fig.update_layout(
208
174
  title="Flooded Building Counts",
209
175
  xaxis_title="Inundation Surface",
210
176
  yaxis_title="Flooded Building Counts",
211
- width=1100,
212
- height=400,
213
- plot_bgcolor="rgba(0, 0, 0, 0)",
214
- paper_bgcolor="rgba(0, 0, 0, 0)",
215
- showlegend=True,
216
- font=dict(family="Arial", size=18, color="black"),
177
+ width=1100, height=400,
178
+ plot_bgcolor="rgba(0,0,0,0)", paper_bgcolor="rgba(0,0,0,0)",
179
+ showlegend=True, font=dict(family="Arial", size=18, color="black"),
217
180
  )
181
+ fig.show()
182
+
183
+ # Seaborn for static PNG saving only
184
+ df_left = pd.DataFrame({
185
+ "Category": ["Candidate", "Benchmark"],
186
+ "Count": [centroid_counts["Candidate"], centroid_counts["Benchmark"]],
187
+ })
188
+ df_right = pd.DataFrame({
189
+ "Category": third_raster_labels,
190
+ "Count": third_raster_counts,
191
+ })
192
+
193
+ sns.set_theme(style="whitegrid")
194
+ fig_sb, axes = plt.subplots(1, 2, figsize=(8, 3), constrained_layout=True)
195
+
196
+ def style_axes(ax, title_text, xlab, show_ylabel: bool):
197
+ # Adding a bit of padding so bar labels don’t overlap with the title
198
+ ax.set_title(title_text, fontsize=16, pad=20)
199
+ ax.set_xlabel(xlab, fontsize=14, color="black")
200
+ if show_ylabel:
201
+ ax.set_ylabel("Flooded Building Counts", fontsize=14, color="black")
202
+ else:
203
+ ax.set_ylabel("")
204
+
205
+ # Thicker black left/bottom spines
206
+ for spine in ("left", "bottom"):
207
+ ax.spines[spine].set_linewidth(1.5)
208
+ ax.spines[spine].set_color("black")
209
+
210
+ sns.despine(ax=ax, right=True, top=True)
211
+ ax.tick_params(axis="x", labelsize=12, colors="black")
212
+ ax.tick_params(axis="y", labelsize=12, colors="black")
213
+
214
+ # Left panel
215
+ ax0 = axes[0]
216
+ sns.barplot(data=df_left, x="Category", y="Count", ax=ax0,
217
+ palette=["#1c83eb", "#a4490e"])
218
+ style_axes(ax0, "Building Counts on Different FIMs", "Inundation Surface", show_ylabel=True)
219
+ for c in ax0.containers:
220
+ ax0.bar_label(c, fmt="%.0f", label_type="edge", padding=3, fontsize=14, color="black")
221
+
222
+ # Right panel
223
+ ax1 = axes[1]
224
+ sns.barplot(data=df_right, x="Category", y="Count", ax=ax1,
225
+ palette=["#ff5733", "#ffc300", "#28a745"])
226
+ style_axes(ax1, "Contingency Flooded Building Counts", "Category", show_ylabel=False)
227
+ for c in ax1.containers:
228
+ ax1.bar_label(c, fmt="%.0f", label_type="edge", padding=3, fontsize=14, color="black")
218
229
 
219
230
  plot_dir = os.path.join(save_dir, "FinalPlots")
220
- if not os.path.exists(plot_dir):
221
- os.makedirs(plot_dir)
231
+ os.makedirs(plot_dir, exist_ok=True)
222
232
  output_path = os.path.join(plot_dir, f"BuildingCounts_{basename}.png")
223
- fig.write_image(output_path, scale=500 / 96, engine="kaleido")
224
- print(f"Performance metrics chart is saved as PNG at {output_path}")
225
- fig.show()
233
+ fig_sb.savefig(output_path, dpi=400)
234
+ plt.close(fig_sb)
235
+
236
+ print(f"PNG were saved in : {output_path}")
226
237
 
227
238
 
239
+
240
+
228
241
  def process_TIFF(
229
242
  tif_files, contingency_files, building_footprint, boundary, method_path
230
243
  ):
@@ -290,15 +303,19 @@ def detect_shapefile(folder):
290
303
  def ensure_pyspark(version: str | None = "3.5.4") -> None:
291
304
  """Install pyspark at runtime via `uv pip` into this env (no-op if present)."""
292
305
  import importlib, shutil, subprocess, sys, re
306
+
293
307
  try:
294
308
  import importlib.util
309
+
295
310
  if importlib.util.find_spec("pyspark"):
296
311
  return
297
312
  except Exception:
298
313
  pass
299
314
  uv = shutil.which("uv")
300
315
  if not uv:
301
- raise RuntimeError("`uv` not found on PATH. Please install uv or add it to PATH.")
316
+ raise RuntimeError(
317
+ "`uv` not found on PATH. Please install uv or add it to PATH."
318
+ )
302
319
  if version is None:
303
320
  spec = "pyspark"
304
321
  else:
@@ -307,7 +324,6 @@ def ensure_pyspark(version: str | None = "3.5.4") -> None:
307
324
  subprocess.check_call([uv, "pip", "install", "--python", sys.executable, spec])
308
325
 
309
326
 
310
-
311
327
  def EvaluationWithBuildingFootprint(
312
328
  main_dir,
313
329
  method_name,
@@ -338,11 +354,11 @@ def EvaluationWithBuildingFootprint(
338
354
  boundary = detect_shapefile(main_dir)
339
355
 
340
356
  building_footprintMS = building_footprint
341
-
357
+
342
358
  if building_footprintMS is None:
343
359
  ensure_pyspark()
344
360
  from .microsoftBF import BuildingFootprintwithISO
345
-
361
+
346
362
  out_dir = os.path.join(method_path, "BuildingFootprint")
347
363
  if not os.path.exists(out_dir):
348
364
  os.makedirs(out_dir)
@@ -398,11 +414,11 @@ def EvaluationWithBuildingFootprint(
398
414
  boundary = detect_shapefile(os.path.join(main_dir, folder))
399
415
 
400
416
  building_footprintMS = building_footprint
401
-
417
+
402
418
  if building_footprintMS is None:
403
419
  ensure_pyspark()
404
420
  from .microsoftBF import BuildingFootprintwithISO
405
-
421
+
406
422
  out_dir = os.path.join(method_path, "BuildingFootprint")
407
423
  if not os.path.exists(out_dir):
408
424
  os.makedirs(out_dir)
@@ -18,6 +18,7 @@ warnings.filterwarnings("ignore")
18
18
  # Authenticate and initialize Earth Engine
19
19
  ee.Authenticate()
20
20
 
21
+
21
22
  # %%
22
23
  def split_into_tiles(boundary, tile_size=0.1):
23
24
  bounds = boundary.total_bounds
@@ -31,24 +31,35 @@ def is_writable(path):
31
31
 
32
32
  def fix_permissions(path):
33
33
  path = Path(path).resolve()
34
- script_path = Path(__file__).parent / "fix_permissions.sh"
35
-
36
- if not script_path.exists():
37
- raise FileNotFoundError(f"Shell script not found: {script_path}")
38
34
 
39
35
  if is_writable(path):
40
36
  return
41
37
 
38
+ uname = platform.system()
39
+
42
40
  try:
43
- result = subprocess.run(
44
- ["bash", str(script_path), str(path)],
45
- check=True,
46
- capture_output=True,
47
- text=True,
48
- )
49
- print(result.stdout)
41
+ if uname in ["Darwin", "Linux"]:
42
+ subprocess.run(
43
+ ["chmod", "-R", "u+rwX", str(path)],
44
+ check=True,
45
+ capture_output=True,
46
+ text=True,
47
+ )
48
+ print(f"Permissions granted for user (u+rwX): {path}")
49
+
50
+ elif "MINGW" in uname or "MSYS" in uname or "CYGWIN" in uname:
51
+ subprocess.run(
52
+ ["icacls", str(path), "/grant", "Everyone:F", "/T"],
53
+ check=True,
54
+ capture_output=True,
55
+ text=True,
56
+ )
57
+ print(f"Permissions granted for working folder: {path}")
58
+
59
+ else:
60
+ print(f"Unsupported OS: {uname}")
50
61
  except subprocess.CalledProcessError as e:
51
- print(f"Shell script failed:\n{e.stderr}")
62
+ print(f"Failed to fix permissions for {path}:\n{e.stderr}")
52
63
 
53
64
 
54
65
  # Function for the evalution of the model
@@ -105,7 +116,7 @@ def evaluateFIM(
105
116
  bounding_geom = AOI(benchmark_path, shapefile, save_dir)
106
117
 
107
118
  else:
108
- print(f"--- {method.__name__} is processing ---")
119
+ print(f"**{method.__name__} is processing**")
109
120
  bounding_geom = method(smallest_raster_path, save_dir=save_dir)
110
121
 
111
122
  # Read and process benchmark raster
@@ -395,7 +406,6 @@ def EvaluateFIM(
395
406
  gdf = gpd.read_file(PWB_dir)
396
407
 
397
408
  # Grant the permission to the main directory
398
- print(f"Fixing permissions for {main_dir}...")
399
409
  fix_permissions(main_dir)
400
410
 
401
411
  # runt the process
@@ -414,12 +424,11 @@ def EvaluateFIM(
414
424
  for tif_file in tif_files:
415
425
  if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
416
426
  benchmark_path = tif_file
417
- print(f"---Benchmark: {tif_file.name}---")
418
427
  else:
419
428
  candidate_path.append(tif_file)
420
429
 
421
430
  if benchmark_path and candidate_path:
422
- print(f"---Flood Inundation Evaluation of {folder_dir.name}---")
431
+ print(f"**Flood Inundation Evaluation of {folder_dir.name}**")
423
432
  Metrics = evaluateFIM(
424
433
  benchmark_path,
425
434
  candidate_path,
@@ -2,75 +2,108 @@ import os
2
2
  import glob
3
3
  import pandas as pd
4
4
  import plotly.express as px
5
+ import seaborn as sns
6
+ import matplotlib.pyplot as plt
5
7
 
6
8
 
7
9
  # Function to plot individual metric scores
8
10
  def PlotMetrics(csv_path, method_path):
9
11
  metrics_df = pd.read_csv(csv_path)
10
- # Extract relevant metrics
12
+
13
+ # Keep only the desired metrics
11
14
  metrics = metrics_df.loc[
12
- metrics_df["Metrics"].isin(
13
- ["CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"]
14
- )
15
+ metrics_df["Metrics"].isin([
16
+ "CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"
17
+ ])
15
18
  ].copy()
16
19
 
17
- metrics.loc[:, "Metrics"] = metrics["Metrics"].replace(
18
- {
19
- "CSI_values": "CSI",
20
- "POD_values": "POD",
21
- "Acc_values": "Accuracy",
22
- "Prec_values": "Precision",
23
- "F1_values": "F1 Score",
24
- }
25
- )
20
+ # Rename for presentation
21
+ metrics["Metrics"] = metrics["Metrics"].replace({
22
+ "CSI_values": "CSI",
23
+ "POD_values": "POD",
24
+ "Acc_values": "Accuracy",
25
+ "Prec_values": "Precision",
26
+ "F1_values": "F1 Score",
27
+ })
28
+
26
29
  value_columns = metrics.select_dtypes(include="number").columns
27
30
 
31
+ # Output directory
32
+ plot_dir = os.path.join(method_path, "FinalPlots")
33
+ os.makedirs(plot_dir, exist_ok=True)
34
+
28
35
  for value_column in value_columns:
29
36
  metrics[value_column] = metrics[value_column].round(2)
30
37
 
31
- # Create the bar plot
32
- fig = px.bar(
38
+ # Showing with Plotly
39
+ fig_plotly = px.bar(
33
40
  metrics,
34
41
  x=value_column,
35
42
  y="Metrics",
36
- title=f"Performance Metrics",
37
- labels={value_column: "Score"},
38
43
  text=value_column,
39
44
  color="Metrics",
45
+ orientation="h",
40
46
  color_discrete_sequence=px.colors.qualitative.Set2,
47
+ title=f"Performance Metrics"
41
48
  )
42
- fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
43
- fig.update_layout(
44
- yaxis_title="Metrics",
45
- xaxis_title="Score",
49
+ fig_plotly.update_traces(texttemplate="%{text:.2f}", textposition="outside")
50
+ fig_plotly.update_layout(
46
51
  showlegend=False,
47
- plot_bgcolor="rgba(0, 0, 0, 0)",
48
- paper_bgcolor="rgba(0, 0, 0, 0)",
49
- margin=dict(l=10, r=10, t=40, b=10),
50
- xaxis=dict(showline=True, linewidth=2, linecolor="black"),
51
- yaxis=dict(showline=True, linewidth=2, linecolor="black"),
52
+ plot_bgcolor="rgba(0,0,0,0)",
53
+ paper_bgcolor="rgba(0,0,0,0)",
52
54
  height=350,
53
55
  width=900,
56
+ xaxis=dict(showline=True, linewidth=2, linecolor="black", title="Score"),
57
+ yaxis=dict(showline=True, linewidth=2, linecolor="black"),
54
58
  title_font=dict(family="Arial", size=24, color="black"),
55
- xaxis_title_font=dict(family="Arial", size=20, color="black"),
56
- yaxis_title_font=dict(family="Arial", size=20, color="black"),
57
59
  font=dict(family="Arial", size=18, color="black"),
58
60
  )
61
+ fig_plotly.show()
59
62
 
60
- # Save each plot as a PNG, using the column name as the filename
61
- plot_dir = os.path.join(method_path, "FinalPlots")
62
- if not os.path.exists(plot_dir):
63
- os.makedirs(plot_dir)
63
+ # Save with Seaborn
64
+ sns.set_theme(style="whitegrid")
65
+ fig, ax = plt.subplots(figsize=(8, 3))
66
+ sns.barplot(
67
+ data=metrics,
68
+ x=value_column,
69
+ y="Metrics",
70
+ hue="Metrics",
71
+ palette="Set2",
72
+ ax=ax,
73
+ dodge=False,
74
+ legend=False
75
+ )
64
76
 
65
- output_filename = f"EvaluationMetrics_{value_column}.png"
66
- output_path = os.path.join(plot_dir, output_filename)
77
+ # Annotate bars
78
+ for container in ax.containers:
79
+ ax.bar_label(container, fmt='%.2f', label_type='edge', fontsize=14)
67
80
 
68
- # Save the plot as PNG
69
- fig.write_image(output_path, engine="kaleido", scale=500 / 96)
70
- print(
71
- f"Performance metrics chart ({value_column}) saved as PNG at {output_path}"
72
- )
73
- fig.show()
81
+ # Styling
82
+ ax.set_title("Performance Metrics", fontsize=16)
83
+ ax.set_xlabel("Score", fontsize=16, color="black") # just bigger, not bold
84
+ ax.set_ylabel("Metrics", fontsize=16, color="black")
85
+
86
+ ax.set_xticks([i/10 for i in range(0, 11, 2)])
87
+ ax.set_xticklabels([f"{i/10:.1f}" for i in range(0, 11, 2)], fontsize=14, color="black")
88
+
89
+ # Increase y-tick label font size
90
+ ax.tick_params(axis="y", labelsize=12, colors="black")
91
+ ax.tick_params(axis="x", labelsize=14, colors="black")
92
+
93
+ # Force spines black + thicker
94
+ ax.spines['left'].set_linewidth(1.5)
95
+ ax.spines['bottom'].set_linewidth(1.5)
96
+ ax.spines['left'].set_color("black")
97
+ ax.spines['bottom'].set_color("black")
98
+
99
+ sns.despine(right=True, top=True)
100
+
101
+ # Save to file
102
+ save_path = os.path.join(plot_dir, f"EvaluationMetrics_{value_column}.png")
103
+ plt.tight_layout()
104
+ fig.savefig(save_path, dpi=400)
105
+ plt.close(fig)
106
+ print(f"PNG saved at: {save_path}")
74
107
 
75
108
 
76
109
  def PlotEvaluationMetrics(main_dir, method_name, out_dir):
fimeval/utilis.py CHANGED
@@ -71,7 +71,6 @@ def reprojectFIMs(src_path, dst_path, target_crs):
71
71
  resampling=Resampling.nearest,
72
72
  )
73
73
  else:
74
- print(f"Source raster is already in {target_crs}. No reprojection needed.")
75
74
  shutil.copy(src_path, dst_path)
76
75
  compress_tif_lzw(dst_path)
77
76
 
@@ -79,9 +78,7 @@ def reprojectFIMs(src_path, dst_path, target_crs):
79
78
  # Resample into the coarser resoution amoung all FIMS within the case
80
79
  def resample_to_resolution(src_path, x_resolution, y_resolution):
81
80
  src_path = Path(src_path)
82
- print(src_path)
83
81
  temp_path = src_path.with_name(src_path.stem + "_resampled.tif")
84
- print(temp_path)
85
82
 
86
83
  with rasterio.open(src_path) as src:
87
84
  transform = rasterio.transform.from_origin(
@@ -144,14 +141,13 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
144
141
  if not final_crs:
145
142
  if all(is_within_conus(b, c) for b, c in zip(bounds_list, crs_list)):
146
143
  final_crs = "EPSG:5070"
147
- print(f"Defaulting to CONUS CRS: {final_crs}")
144
+ print(f"Defaulting to CONUS CRS: {final_crs}, Reprojecting.")
148
145
  else:
149
146
  print(
150
147
  "Mixed or non-CONUS CRS detected. Please provide a valid target CRS."
151
148
  )
152
149
  return
153
150
 
154
- print(f"Reprojecting all rasters to {final_crs}")
155
151
  for src_path in tif_files:
156
152
  dst_path = processing_folder / src_path.name
157
153
  reprojectFIMs(str(src_path), str(dst_path), final_crs)