fimeval 0.1.54__tar.gz → 0.1.56__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. {fimeval-0.1.54 → fimeval-0.1.56}/PKG-INFO +2 -1
  2. {fimeval-0.1.54 → fimeval-0.1.56}/pyproject.toml +3 -2
  3. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/BuildingFootprint/evaluationwithBF.py +101 -23
  4. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/evaluationFIM.py +4 -5
  5. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/plotevaluationmetrics.py +65 -28
  6. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/utilis.py +1 -5
  7. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval.egg-info/PKG-INFO +2 -1
  8. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval.egg-info/requires.txt +1 -0
  9. {fimeval-0.1.54 → fimeval-0.1.56}/tests/test_evaluationfim.py +1 -1
  10. {fimeval-0.1.54 → fimeval-0.1.56}/LICENSE.txt +0 -0
  11. {fimeval-0.1.54 → fimeval-0.1.56}/README.md +0 -0
  12. {fimeval-0.1.54 → fimeval-0.1.56}/setup.cfg +0 -0
  13. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/BuildingFootprint/__init__.py +0 -0
  14. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/BuildingFootprint/microsoftBF.py +0 -0
  15. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/PWBs3.py +0 -0
  16. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/__init__.py +0 -0
  17. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/methods.py +0 -0
  18. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/metrics.py +0 -0
  19. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/ContingencyMap/printcontingency.py +0 -0
  20. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval/__init__.py +0 -0
  21. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval.egg-info/SOURCES.txt +0 -0
  22. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval.egg-info/dependency_links.txt +0 -0
  23. {fimeval-0.1.54 → fimeval-0.1.56}/src/fimeval.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fimeval
3
- Version: 0.1.54
3
+ Version: 0.1.56
4
4
  Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
5
5
  Author: Surface Dynamics Modeling Lab
6
6
  Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
@@ -683,6 +683,7 @@ Requires-Dist: notebook<8.0.0,>=7.3.2
683
683
  Requires-Dist: boto3<2.0.0,>=1.36.16
684
684
  Requires-Dist: geemap
685
685
  Requires-Dist: uv
686
+ Requires-Dist: seaborn
686
687
  Provides-Extra: dev
687
688
  Requires-Dist: pytest; extra == "dev"
688
689
  Requires-Dist: black; extra == "dev"
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fimeval"
3
- version = "0.1.54"
3
+ version = "0.1.56"
4
4
  description = "A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -30,7 +30,8 @@ dependencies = [
30
30
  "notebook>=7.3.2,<8.0.0",
31
31
  "boto3>=1.36.16,<2.0.0",
32
32
  "geemap",
33
- "uv"
33
+ "uv",
34
+ "seaborn"
34
35
  ]
35
36
 
36
37
  [project.optional-dependencies]
@@ -6,6 +6,9 @@ import pandas as pd
6
6
  from pathlib import Path
7
7
  from plotly.subplots import make_subplots
8
8
  import plotly.graph_objects as go
9
+ import seaborn as sns
10
+ import matplotlib.pyplot as plt
11
+ import matplotlib.gridspec as gridspec
9
12
 
10
13
 
11
14
  def Changeintogpkg(input_path, output_dir, layer_name):
@@ -20,6 +23,7 @@ def Changeintogpkg(input_path, output_dir, layer_name):
20
23
  gdf.to_file(output_gpkg, driver="GPKG")
21
24
  return output_gpkg
22
25
 
26
+
23
27
  def GetFloodedBuildingCountInfo(
24
28
  building_fp_path,
25
29
  study_area_path,
@@ -57,7 +61,6 @@ def GetFloodedBuildingCountInfo(
57
61
  "True Positive": 0,
58
62
  }
59
63
 
60
- # Count centroids in the contingency map
61
64
  def count_centroids_in_contingency(raster_path):
62
65
  with rasterio.open(raster_path) as src:
63
66
  raster_data = src.read(1)
@@ -74,10 +77,12 @@ def GetFloodedBuildingCountInfo(
74
77
 
75
78
  count_centroids_in_contingency(contingency_map)
76
79
 
77
- # Calculate Candidate and Benchmark counts from the contingency map counts
78
- centroid_counts["Candidate"] = centroid_counts["True Positive"] + centroid_counts["False Positive"]
79
- centroid_counts["Benchmark"] = centroid_counts["True Positive"] + centroid_counts["False Negative"]
80
-
80
+ centroid_counts["Candidate"] = (
81
+ centroid_counts["True Positive"] + centroid_counts["False Positive"]
82
+ )
83
+ centroid_counts["Benchmark"] = (
84
+ centroid_counts["True Positive"] + centroid_counts["False Negative"]
85
+ )
81
86
 
82
87
  total_buildings = len(clipped_buildings)
83
88
  percentages = {
@@ -92,15 +97,13 @@ def GetFloodedBuildingCountInfo(
92
97
  CSI = TP / (TP + FP + FN) if (TP + FP + FN) > 0 else 0
93
98
  FAR = FP / (TP + FP) if (TP + FP) > 0 else 0
94
99
  POD = TP / (TP + FN) if (TP + FN) > 0 else 0
95
-
96
100
  if centroid_counts["Benchmark"] > 0:
97
101
  BDR = (
98
- (centroid_counts["Candidate"] - centroid_counts["Benchmark"])
99
- / centroid_counts["Benchmark"]
100
- )
102
+ centroid_counts["Candidate"] - centroid_counts["Benchmark"]
103
+ ) / centroid_counts["Benchmark"]
101
104
  else:
102
- BDR = 0
103
-
105
+ BDR = 0
106
+
104
107
  counts_data = {
105
108
  "Category": [
106
109
  "Candidate",
@@ -125,13 +128,14 @@ def GetFloodedBuildingCountInfo(
125
128
  f"{BDR:.3f}",
126
129
  ],
127
130
  }
128
-
129
131
  counts_df = pd.DataFrame(counts_data)
130
132
  csv_file_path = os.path.join(
131
133
  save_dir, "EvaluationMetrics", f"BuildingCounts_{basename}.csv"
132
134
  )
135
+ os.makedirs(os.path.dirname(csv_file_path), exist_ok=True)
133
136
  counts_df.to_csv(csv_file_path, index=False)
134
137
 
138
+ # Plotly interactive visualization only
135
139
  third_raster_labels = ["False Positive", "False Negative", "True Positive"]
136
140
  third_raster_counts = [
137
141
  centroid_counts["False Positive"],
@@ -162,7 +166,6 @@ def GetFloodedBuildingCountInfo(
162
166
  row=1,
163
167
  col=1,
164
168
  )
165
-
166
169
  fig.add_trace(
167
170
  go.Bar(
168
171
  x=["Benchmark"],
@@ -178,17 +181,17 @@ def GetFloodedBuildingCountInfo(
178
181
  col=1,
179
182
  )
180
183
 
181
- for i in range(len(third_raster_labels)):
184
+ for i, label in enumerate(third_raster_labels):
182
185
  fig.add_trace(
183
186
  go.Bar(
184
- x=[third_raster_labels[i]],
187
+ x=[label],
185
188
  y=[third_raster_counts[i]],
186
189
  text=[f"{third_raster_counts[i]}"],
187
190
  textposition="auto",
188
191
  marker_color=["#ff5733", "#ffc300", "#28a745"][i],
189
192
  marker_line_color="black",
190
193
  marker_line_width=1,
191
- name=f"{third_raster_labels[i]} ({percentages[third_raster_labels[i]]:.2f}%)",
194
+ name=f"{label} ({percentages[label]:.2f}%)",
192
195
  ),
193
196
  row=1,
194
197
  col=2,
@@ -200,19 +203,94 @@ def GetFloodedBuildingCountInfo(
200
203
  yaxis_title="Flooded Building Counts",
201
204
  width=1100,
202
205
  height=400,
203
- plot_bgcolor="rgba(0, 0, 0, 0)",
204
- paper_bgcolor="rgba(0, 0, 0, 0)",
206
+ plot_bgcolor="rgba(0,0,0,0)",
207
+ paper_bgcolor="rgba(0,0,0,0)",
205
208
  showlegend=True,
206
209
  font=dict(family="Arial", size=18, color="black"),
207
210
  )
211
+ fig.show()
212
+
213
+ # Seaborn for static PNG
214
+ df_left = pd.DataFrame(
215
+ {
216
+ "Category": ["Candidate", "Benchmark"],
217
+ "Count": [centroid_counts["Candidate"], centroid_counts["Benchmark"]],
218
+ }
219
+ )
220
+ df_right = pd.DataFrame(
221
+ {
222
+ "Category": third_raster_labels,
223
+ "Count": third_raster_counts,
224
+ }
225
+ )
208
226
 
227
+ sns.set_theme(style="whitegrid")
228
+
229
+ fig_sb = plt.figure(figsize=(10, 3), constrained_layout=True)
230
+ gs = gridspec.GridSpec(1, 3, figure=fig_sb, width_ratios=[1, 1, 0.4])
231
+
232
+ ax0 = fig_sb.add_subplot(gs[0, 0])
233
+ ax1 = fig_sb.add_subplot(gs[0, 1])
234
+ ax_leg = fig_sb.add_subplot(gs[0, 2])
235
+ ax_leg.axis("off")
236
+
237
+ def style_axes(ax, title_text, xlab, show_ylabel: bool):
238
+ ax.set_title(title_text, fontsize=14, pad=15)
239
+ ax.set_xlabel(xlab, fontsize=13, color="black")
240
+ if show_ylabel:
241
+ ax.set_ylabel("Flooded Building Counts", fontsize=13, color="black")
242
+ else:
243
+ ax.set_ylabel("")
244
+
245
+ for spine in ("left", "bottom"):
246
+ ax.spines[spine].set_linewidth(1.5)
247
+ ax.spines[spine].set_color("black")
248
+
249
+ sns.despine(ax=ax, right=True, top=True)
250
+ ax.tick_params(axis="x", labelsize=11, colors="black")
251
+ ax.tick_params(axis="y", labelsize=11, colors="black")
252
+
253
+ # Left panel
254
+ colors_left = ["#1c83eb", "#a4490e"]
255
+ sns.barplot(data=df_left, x="Category", y="Count", ax=ax0, palette=colors_left)
256
+ style_axes(ax0, "Building Counts on Different FIMs", "Inundation Surface", True)
257
+ for c in ax0.containers:
258
+ ax0.bar_label(
259
+ c, fmt="%.0f", label_type="edge", padding=3, fontsize=12, color="black"
260
+ )
261
+
262
+ # Right panel
263
+ colors_right = ["#ff5733", "#ffc300", "#28a745"]
264
+ sns.barplot(data=df_right, x="Category", y="Count", ax=ax1, palette=colors_right)
265
+ style_axes(ax1, "Contingency Flooded Building Counts", "Category", False)
266
+ for c in ax1.containers:
267
+ ax1.bar_label(
268
+ c, fmt="%.0f", label_type="edge", padding=3, fontsize=12, color="black"
269
+ )
270
+
271
+ # Combined legend
272
+ all_labels = ["Candidate", "Benchmark"] + third_raster_labels
273
+ all_colors = colors_left + colors_right
274
+ legend_handles = [
275
+ plt.Line2D(
276
+ [0],
277
+ [0],
278
+ marker="s",
279
+ color="w",
280
+ markerfacecolor=all_colors[i],
281
+ markersize=12,
282
+ label=f"{all_labels[i]} ({percentages[all_labels[i]]:.2f}%)",
283
+ )
284
+ for i in range(len(all_labels))
285
+ ]
286
+ ax_leg.legend(handles=legend_handles, fontsize=12, loc="center left", frameon=True)
209
287
  plot_dir = os.path.join(save_dir, "FinalPlots")
210
- if not os.path.exists(plot_dir):
211
- os.makedirs(plot_dir)
288
+ os.makedirs(plot_dir, exist_ok=True)
212
289
  output_path = os.path.join(plot_dir, f"BuildingCounts_{basename}.png")
213
- fig.write_image(output_path, scale=500 / 96, engine="kaleido")
214
- print(f"Performance metrics chart is saved as PNG at {output_path}")
215
- fig.show()
290
+ fig_sb.savefig(output_path, dpi=400)
291
+ plt.close(fig_sb)
292
+
293
+ print(f"PNG were saved in : {output_path}")
216
294
 
217
295
 
218
296
  def process_TIFF(
@@ -116,7 +116,7 @@ def evaluateFIM(
116
116
  bounding_geom = AOI(benchmark_path, shapefile, save_dir)
117
117
 
118
118
  else:
119
- print(f"--- {method.__name__} is processing ---")
119
+ print(f"**{method.__name__} is processing**")
120
120
  bounding_geom = method(smallest_raster_path, save_dir=save_dir)
121
121
 
122
122
  # Read and process benchmark raster
@@ -277,7 +277,8 @@ def evaluateFIM(
277
277
  out_transform1,
278
278
  )
279
279
  merged = out_image1 + out_image2_resized
280
-
280
+ merged[merged==7] = 5
281
+
281
282
  # Get Evaluation Metrics
282
283
  (
283
284
  unique_values,
@@ -406,7 +407,6 @@ def EvaluateFIM(
406
407
  gdf = gpd.read_file(PWB_dir)
407
408
 
408
409
  # Grant the permission to the main directory
409
- print(f"Fixing permissions for {main_dir}...")
410
410
  fix_permissions(main_dir)
411
411
 
412
412
  # runt the process
@@ -425,12 +425,11 @@ def EvaluateFIM(
425
425
  for tif_file in tif_files:
426
426
  if "benchmark" in tif_file.name.lower() or "BM" in tif_file.name:
427
427
  benchmark_path = tif_file
428
- print(f"---Benchmark: {tif_file.name}---")
429
428
  else:
430
429
  candidate_path.append(tif_file)
431
430
 
432
431
  if benchmark_path and candidate_path:
433
- print(f"---Flood Inundation Evaluation of {folder_dir.name}---")
432
+ print(f"**Flood Inundation Evaluation of {folder_dir.name}**")
434
433
  Metrics = evaluateFIM(
435
434
  benchmark_path,
436
435
  candidate_path,
@@ -2,19 +2,23 @@ import os
2
2
  import glob
3
3
  import pandas as pd
4
4
  import plotly.express as px
5
+ import seaborn as sns
6
+ import matplotlib.pyplot as plt
5
7
 
6
8
 
7
9
  # Function to plot individual metric scores
8
10
  def PlotMetrics(csv_path, method_path):
9
11
  metrics_df = pd.read_csv(csv_path)
10
- # Extract relevant metrics
12
+
13
+ # Keep only the desired metrics
11
14
  metrics = metrics_df.loc[
12
15
  metrics_df["Metrics"].isin(
13
16
  ["CSI_values", "POD_values", "Acc_values", "Prec_values", "F1_values"]
14
17
  )
15
18
  ].copy()
16
19
 
17
- metrics.loc[:, "Metrics"] = metrics["Metrics"].replace(
20
+ # Rename for presentation
21
+ metrics["Metrics"] = metrics["Metrics"].replace(
18
22
  {
19
23
  "CSI_values": "CSI",
20
24
  "POD_values": "POD",
@@ -23,54 +27,87 @@ def PlotMetrics(csv_path, method_path):
23
27
  "F1_values": "F1 Score",
24
28
  }
25
29
  )
30
+
26
31
  value_columns = metrics.select_dtypes(include="number").columns
27
32
 
33
+ # Output directory
34
+ plot_dir = os.path.join(method_path, "FinalPlots")
35
+ os.makedirs(plot_dir, exist_ok=True)
36
+
28
37
  for value_column in value_columns:
29
38
  metrics[value_column] = metrics[value_column].round(2)
30
39
 
31
- # Create the bar plot
32
- fig = px.bar(
40
+ # Showing with Plotly
41
+ fig_plotly = px.bar(
33
42
  metrics,
34
43
  x=value_column,
35
44
  y="Metrics",
36
- title=f"Performance Metrics",
37
- labels={value_column: "Score"},
38
45
  text=value_column,
39
46
  color="Metrics",
47
+ orientation="h",
40
48
  color_discrete_sequence=px.colors.qualitative.Set2,
49
+ title=f"Performance Metrics",
41
50
  )
42
- fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
43
- fig.update_layout(
44
- yaxis_title="Metrics",
45
- xaxis_title="Score",
51
+ fig_plotly.update_traces(texttemplate="%{text:.2f}", textposition="outside")
52
+ fig_plotly.update_layout(
46
53
  showlegend=False,
47
- plot_bgcolor="rgba(0, 0, 0, 0)",
48
- paper_bgcolor="rgba(0, 0, 0, 0)",
49
- margin=dict(l=10, r=10, t=40, b=10),
50
- xaxis=dict(showline=True, linewidth=2, linecolor="black"),
51
- yaxis=dict(showline=True, linewidth=2, linecolor="black"),
54
+ plot_bgcolor="rgba(0,0,0,0)",
55
+ paper_bgcolor="rgba(0,0,0,0)",
52
56
  height=350,
53
57
  width=900,
58
+ xaxis=dict(showline=True, linewidth=2, linecolor="black", title="Score"),
59
+ yaxis=dict(showline=True, linewidth=2, linecolor="black"),
54
60
  title_font=dict(family="Arial", size=24, color="black"),
55
- xaxis_title_font=dict(family="Arial", size=20, color="black"),
56
- yaxis_title_font=dict(family="Arial", size=20, color="black"),
57
61
  font=dict(family="Arial", size=18, color="black"),
58
62
  )
63
+ fig_plotly.show()
59
64
 
60
- # Save each plot as a PNG, using the column name as the filename
61
- plot_dir = os.path.join(method_path, "FinalPlots")
62
- if not os.path.exists(plot_dir):
63
- os.makedirs(plot_dir)
65
+ # Save with Seaborn
66
+ sns.set_theme(style="whitegrid")
67
+ fig, ax = plt.subplots(figsize=(8, 3))
68
+ sns.barplot(
69
+ data=metrics,
70
+ x=value_column,
71
+ y="Metrics",
72
+ hue="Metrics",
73
+ palette="Set2",
74
+ ax=ax,
75
+ dodge=False,
76
+ legend=False,
77
+ )
64
78
 
65
- output_filename = f"EvaluationMetrics_{value_column}.png"
66
- output_path = os.path.join(plot_dir, output_filename)
79
+ # Annotate bars
80
+ for container in ax.containers:
81
+ ax.bar_label(container, fmt="%.2f", label_type="edge", fontsize=14)
67
82
 
68
- # Save the plot as PNG
69
- fig.write_image(output_path, engine="kaleido", scale=500 / 96)
70
- print(
71
- f"Performance metrics chart ({value_column}) saved as PNG at {output_path}"
83
+ # Styling
84
+ ax.set_title("Performance Metrics", fontsize=16)
85
+ ax.set_xlabel("Score", fontsize=16, color="black") # just bigger, not bold
86
+ ax.set_ylabel("Metrics", fontsize=16, color="black")
87
+
88
+ ax.set_xticks([i / 10 for i in range(0, 11, 2)])
89
+ ax.set_xticklabels(
90
+ [f"{i/10:.1f}" for i in range(0, 11, 2)], fontsize=14, color="black"
72
91
  )
73
- fig.show()
92
+
93
+ # Increase y-tick label font size
94
+ ax.tick_params(axis="y", labelsize=12, colors="black")
95
+ ax.tick_params(axis="x", labelsize=14, colors="black")
96
+
97
+ # Force spines black + thicker
98
+ ax.spines["left"].set_linewidth(1.5)
99
+ ax.spines["bottom"].set_linewidth(1.5)
100
+ ax.spines["left"].set_color("black")
101
+ ax.spines["bottom"].set_color("black")
102
+
103
+ sns.despine(right=True, top=True)
104
+
105
+ # Save to file
106
+ save_path = os.path.join(plot_dir, f"EvaluationMetrics_{value_column}.png")
107
+ plt.tight_layout()
108
+ fig.savefig(save_path, dpi=400)
109
+ plt.close(fig)
110
+ print(f"PNG saved at: {save_path}")
74
111
 
75
112
 
76
113
  def PlotEvaluationMetrics(main_dir, method_name, out_dir):
@@ -71,7 +71,6 @@ def reprojectFIMs(src_path, dst_path, target_crs):
71
71
  resampling=Resampling.nearest,
72
72
  )
73
73
  else:
74
- print(f"Source raster is already in {target_crs}. No reprojection needed.")
75
74
  shutil.copy(src_path, dst_path)
76
75
  compress_tif_lzw(dst_path)
77
76
 
@@ -79,9 +78,7 @@ def reprojectFIMs(src_path, dst_path, target_crs):
79
78
  # Resample into the coarser resoution amoung all FIMS within the case
80
79
  def resample_to_resolution(src_path, x_resolution, y_resolution):
81
80
  src_path = Path(src_path)
82
- print(src_path)
83
81
  temp_path = src_path.with_name(src_path.stem + "_resampled.tif")
84
- print(temp_path)
85
82
 
86
83
  with rasterio.open(src_path) as src:
87
84
  transform = rasterio.transform.from_origin(
@@ -144,14 +141,13 @@ def MakeFIMsUniform(fim_dir, target_crs=None, target_resolution=None):
144
141
  if not final_crs:
145
142
  if all(is_within_conus(b, c) for b, c in zip(bounds_list, crs_list)):
146
143
  final_crs = "EPSG:5070"
147
- print(f"Defaulting to CONUS CRS: {final_crs}")
144
+ print(f"Defaulting to CONUS CRS: {final_crs}, Reprojecting.")
148
145
  else:
149
146
  print(
150
147
  "Mixed or non-CONUS CRS detected. Please provide a valid target CRS."
151
148
  )
152
149
  return
153
150
 
154
- print(f"Reprojecting all rasters to {final_crs}")
155
151
  for src_path in tif_files:
156
152
  dst_path = processing_folder / src_path.name
157
153
  reprojectFIMs(str(src_path), str(dst_path), final_crs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fimeval
3
- Version: 0.1.54
3
+ Version: 0.1.56
4
4
  Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
5
5
  Author: Surface Dynamics Modeling Lab
6
6
  Author-email: Supath Dhital <sdhital@crimson.ua.edu>, Dipshika Devi <ddevi@ua.edu>
@@ -683,6 +683,7 @@ Requires-Dist: notebook<8.0.0,>=7.3.2
683
683
  Requires-Dist: boto3<2.0.0,>=1.36.16
684
684
  Requires-Dist: geemap
685
685
  Requires-Dist: uv
686
+ Requires-Dist: seaborn
686
687
  Provides-Extra: dev
687
688
  Requires-Dist: pytest; extra == "dev"
688
689
  Requires-Dist: black; extra == "dev"
@@ -11,6 +11,7 @@ notebook<8.0.0,>=7.3.2
11
11
  boto3<2.0.0,>=1.36.16
12
12
  geemap
13
13
  uv
14
+ seaborn
14
15
 
15
16
  [dev]
16
17
  pytest
@@ -29,7 +29,7 @@ countryISO = "USA"
29
29
  def test_evaluation_framework():
30
30
  # Run the evaluation
31
31
  # It has the Permanent Water Bodies (PWB) dataset as default for United States
32
- fe.EvaluateFIM(Main_dir, method_name, output_dir)
32
+ # fe.EvaluateFIM(Main_dir, method_name, output_dir)
33
33
 
34
34
  # OR, If the Evaluation Study Area is outside the US or, user has their own PWB dataset
35
35
  # fe.EvaluateFIM(Main_dir, method_name, output_dir)
File without changes
File without changes
File without changes