fimeval 0.1.44__tar.gz → 0.1.45__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: fimeval
3
- Version: 0.1.44
3
+ Version: 0.1.45
4
4
  Summary: A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation
5
5
  License: GPLv3
6
6
  Author: Surface Dynamics Modeling Lab
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "fimeval"
3
- version = "0.1.44"
3
+ version = "0.1.45"
4
4
  description = "A Framework for Automatic Evaluation of Flood Inundation Mapping Predictions Evaluation"
5
5
  authors = [
6
6
  "Surface Dynamics Modeling Lab",
@@ -40,7 +40,6 @@ def evaluateFIM(
40
40
  Merged = []
41
41
  Unique = []
42
42
  FAR_values = []
43
- Dice_values = []
44
43
 
45
44
  # Dynamically call the specified method
46
45
  method = globals().get(method)
@@ -255,7 +254,6 @@ def evaluateFIM(
255
254
  FPR,
256
255
  merged,
257
256
  FAR,
258
- Dice,
259
257
  ) = evaluationmetrics(out_image1, out_image2_resized)
260
258
 
261
259
  # Append values to the lists
@@ -275,7 +273,6 @@ def evaluateFIM(
275
273
  Merged.append(merged)
276
274
  Unique.append(unique_values)
277
275
  FAR_values.append(FAR)
278
- Dice_values.append(Dice)
279
276
 
280
277
  results = {
281
278
  "CSI_values": csi_values,
@@ -294,7 +291,6 @@ def evaluateFIM(
294
291
  # 'Merged': Merged,
295
292
  # 'Unique': Unique
296
293
  "FAR_values": FAR_values,
297
- "Dice_values": Dice_values,
298
294
  }
299
295
  for candidate_idx, candidate_path in enumerate(candidate_paths):
300
296
  candidate_BASENAME = os.path.splitext(os.path.basename(candidate_path))[0]
@@ -0,0 +1,43 @@
1
+ import numpy as np
2
+
3
+
4
+ # Get all the evaluation metrics
5
+ def evaluationmetrics(out_image1, out_image2):
6
+ merged = out_image1 + out_image2
7
+ unique_values, counts = np.unique(merged, return_counts=True)
8
+ class_pixel_counts = dict(zip(unique_values, counts))
9
+ class_pixel_counts
10
+ TN = class_pixel_counts.get(1,0)
11
+ FP = class_pixel_counts.get(2,0)
12
+ FN = class_pixel_counts.get(3,0)
13
+ TP = class_pixel_counts.get(4,0)
14
+ epsilon = 1e-8
15
+ TPR = TP / (TP + FN+epsilon)
16
+ FNR = FN / (TP + FN+epsilon)
17
+ Acc = (TP + TN) / (TP + TN + FP + FN+epsilon)
18
+ Prec = TP / (TP + FP+epsilon)
19
+ sen = TP / (TP + FN+epsilon)
20
+ F1_score = 2 * (Prec * sen) / (Prec + sen+epsilon)
21
+ CSI = TP / (TP + FN + FP+epsilon)
22
+ POD = TP / (TP + FN+epsilon)
23
+ FPR = FP / (FP + TN+epsilon)
24
+ FAR = FP / (TP + FP+epsilon)
25
+
26
+ return (
27
+ unique_values,
28
+ TN,
29
+ FP,
30
+ FN,
31
+ TP,
32
+ TPR,
33
+ FNR,
34
+ Acc,
35
+ Prec,
36
+ sen,
37
+ CSI,
38
+ F1_score,
39
+ POD,
40
+ FPR,
41
+ merged,
42
+ FAR,
43
+ )
@@ -1,44 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- # Get all the evaluation metrics
5
- def evaluationmetrics(out_image1, out_image2):
6
- merged = out_image1 + out_image2
7
- unique_values, counts = np.unique(merged, return_counts=True)
8
- class_pixel_counts = dict(zip(unique_values, counts))
9
- class_pixel_counts
10
- TN = class_pixel_counts[1]
11
- FP = class_pixel_counts[2]
12
- FN = class_pixel_counts[3]
13
- TP = class_pixel_counts[4]
14
- TPR = TP / (TP + FN)
15
- FNR = FN / (TP + FN)
16
- Acc = (TP + TN) / (TP + TN + FP + FN)
17
- Prec = TP / (TP + FP)
18
- sen = TP / (TP + FN)
19
- F1_score = 2 * (Prec * sen) / (Prec + sen)
20
- CSI = TP / (TP + FN + FP)
21
- POD = TP / (TP + FN)
22
- FPR = FP / (FP + TN)
23
- FAR = FP / (TP + FP)
24
- Dice = 2 * TP / (2 * TP + FP + FN)
25
-
26
- return (
27
- unique_values,
28
- TN,
29
- FP,
30
- FN,
31
- TP,
32
- TPR,
33
- FNR,
34
- Acc,
35
- Prec,
36
- sen,
37
- CSI,
38
- F1_score,
39
- POD,
40
- FPR,
41
- merged,
42
- FAR,
43
- Dice,
44
- )
File without changes
File without changes
File without changes