supervisely 6.73.238__py3-none-any.whl → 6.73.240__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. supervisely/annotation/annotation.py +2 -2
  2. supervisely/api/entity_annotation/tag_api.py +11 -4
  3. supervisely/api/file_api.py +17 -3
  4. supervisely/nn/__init__.py +1 -0
  5. supervisely/nn/benchmark/__init__.py +14 -2
  6. supervisely/nn/benchmark/base_benchmark.py +84 -37
  7. supervisely/nn/benchmark/base_evaluator.py +120 -0
  8. supervisely/nn/benchmark/base_visualizer.py +265 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
  18. supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
  19. supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
  20. supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
  21. supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
  22. supervisely/nn/benchmark/object_detection/__init__.py +0 -0
  23. supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
  24. supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
  25. supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
  26. supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
  27. supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
  28. supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
  29. supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
  30. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
  31. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
  32. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
  33. supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
  34. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
  35. supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
  36. supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
  37. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
  38. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
  39. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
  40. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
  41. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
  42. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
  43. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
  44. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
  45. supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
  46. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
  47. supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
  48. supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
  49. supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
  50. supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
  51. supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
  52. supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
  53. supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
  54. supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
  55. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
  56. supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
  57. supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
  58. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
  59. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
  60. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
  61. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
  62. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
  63. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
  64. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
  65. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
  66. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
  67. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
  68. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
  69. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
  70. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
  71. supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
  72. supervisely/nn/benchmark/utils/__init__.py +12 -0
  73. supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
  74. supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
  75. supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
  76. supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
  77. supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
  78. supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
  79. supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
  80. supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
  81. supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
  82. supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
  83. supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
  84. supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
  85. supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
  86. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
  87. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
  88. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
  89. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
  90. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
  91. supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
  92. supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
  93. supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
  94. supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
  95. supervisely/project/project.py +18 -6
  96. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/METADATA +3 -1
  97. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/RECORD +104 -82
  98. supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
  99. supervisely/nn/benchmark/evaluation/__init__.py +0 -3
  100. supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
  101. supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
  102. supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
  103. supervisely/nn/benchmark/utils.py +0 -13
  104. supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
  105. supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
  106. supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
  107. supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
  108. supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
  109. supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
  110. supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
  111. supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
  112. supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
  113. supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
  114. supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
  115. supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
  116. supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
  117. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
  118. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
  119. supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
  120. supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
  121. supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
  122. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
  123. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
  124. supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
  125. supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
  126. supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
  127. supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
  128. supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
  129. supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
  130. supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
  131. supervisely/nn/benchmark/visualization/visualizer.py +0 -729
  132. /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
  133. /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
  134. /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
  135. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/LICENSE +0 -0
  136. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/WHEEL +0 -0
  137. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/entry_points.txt +0 -0
  138. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,92 @@
1
+ from typing import Dict
2
+
3
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
4
+ SemanticSegmVisMetric,
5
+ )
6
+ from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
7
+
8
+
9
+ class ConfusionMatrix(SemanticSegmVisMetric):
10
+
11
+ def __init__(self, *args, **kwargs) -> None:
12
+ super().__init__(*args, **kwargs)
13
+ self.clickable = True
14
+ self._keypair_sep = "-"
15
+
16
+ @property
17
+ def md(self) -> MarkdownWidget:
18
+ return MarkdownWidget(
19
+ "confusion_matrix",
20
+ "Confusion Matrix",
21
+ text=self.vis_texts.markdown_confusion_matrix,
22
+ )
23
+
24
+ @property
25
+ def chart(self) -> ChartWidget:
26
+ chart = ChartWidget("confusion_matrix", self.get_figure())
27
+ chart.set_click_data(
28
+ self.explore_modal_table.id,
29
+ self.get_click_data(),
30
+ chart_click_extra="'getKey': (payload) => `${payload.points[0].y}${'-'}${payload.points[0].x}`, 'keySeparator': '-',",
31
+ )
32
+ return chart
33
+
34
+ def get_figure(self):
35
+ import plotly.graph_objects as go # pylint: disable=import-error
36
+
37
+ fig = go.Figure()
38
+
39
+ # # Confusion Matrix figure
40
+ confusion_matrix, class_names = self.eval_result.mp.confusion_matrix
41
+
42
+ x = class_names
43
+ y = x[::-1].copy()
44
+ if len(x) >= 20:
45
+ text_anns = [[str(el) for el in row] for row in confusion_matrix]
46
+ else:
47
+ text_anns = [
48
+ [
49
+ f"Predicted: {pred}<br>Ground Truth: {gt}<br> Probability: {confusion_matrix[ig][ip]}"
50
+ for ip, pred in enumerate(x)
51
+ ]
52
+ for ig, gt in enumerate(y)
53
+ ]
54
+
55
+ fig.add_trace(
56
+ go.Heatmap(
57
+ z=confusion_matrix,
58
+ x=x,
59
+ y=y,
60
+ colorscale="Viridis",
61
+ showscale=False,
62
+ text=text_anns,
63
+ hoverinfo="text",
64
+ )
65
+ )
66
+
67
+ fig.update_layout(xaxis_title="Predicted", yaxis_title="Ground Truth")
68
+ if len(x) <= 20:
69
+ fig.update_layout(width=600, height=600)
70
+ return fig
71
+
72
+ def get_click_data(self) -> Dict:
73
+ res = dict(projectMeta=self.eval_result.pred_project_meta.to_json())
74
+ res["layoutTemplate"] = [None, None, None]
75
+ res["clickData"] = {}
76
+
77
+ _, class_names = self.eval_result.mp.confusion_matrix
78
+ for ig, gt_key in enumerate(class_names):
79
+ for ip, pred_key in enumerate(class_names):
80
+ key = f"{gt_key}{self._keypair_sep}{pred_key}"
81
+ res["clickData"][key] = {}
82
+ res["clickData"][key]["imagesIds"] = []
83
+
84
+ cmat_key = str(ig) + "_" + str(ip)
85
+ for name in self.eval_result.mp.cmat_cell_img_names[cmat_key]:
86
+ gt_img_id = self.eval_result.images_map[name]
87
+ pred_img_id = self.eval_result.matched_pair_data[gt_img_id].pred_image_info.id
88
+ res["clickData"][key]["imagesIds"].append(pred_img_id)
89
+ title = f"Confusion Matrix. GT: '{gt_key}' ― Predicted: '{pred_key}'"
90
+ res["clickData"][key]["title"] = title
91
+
92
+ return res
@@ -0,0 +1,84 @@
1
+ from typing import Dict
2
+
3
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
4
+ SemanticSegmVisMetric,
5
+ )
6
+ from supervisely.nn.benchmark.visualization.widgets import GalleryWidget, MarkdownWidget
7
+
8
+
9
+ class ExplorePredictions(SemanticSegmVisMetric):
10
+ MARKDOWN = "explore_predictions"
11
+ GALLERY = "explore_predictions"
12
+
13
+ def __init__(self, *args, **kwargs) -> None:
14
+ super().__init__(*args, **kwargs)
15
+ self.clickable = True
16
+
17
+ @property
18
+ def md(self) -> MarkdownWidget:
19
+ text = self.vis_texts.markdown_explorer
20
+ return MarkdownWidget(self.MARKDOWN, "Explore Predictions", text)
21
+
22
+ def gallery(self, opacity) -> GalleryWidget:
23
+ gallery = GalleryWidget(self.GALLERY, columns_number=3, opacity=opacity)
24
+ gallery.set_project_meta(self.eval_result.filtered_project_meta)
25
+ gallery.add_image_left_header("Compare with GT")
26
+
27
+ gallery.set_images(
28
+ image_infos=self.eval_result.sample_images,
29
+ ann_infos=self.eval_result.sample_anns,
30
+ )
31
+ gallery._gallery._update_filters()
32
+
33
+ # set click data for diff gallery
34
+ self.explore_modal_table.set_click_data(
35
+ self.diff_modal_table.id,
36
+ self.get_click_data(),
37
+ get_key="(payload) => `${payload.annotation.image_id || payload.annotation.imageId}`",
38
+ )
39
+
40
+ gallery.set_click_data(
41
+ self.diff_modal_table.id,
42
+ self.get_click_data(),
43
+ get_key="(payload) => `${payload.annotation.image_id || payload.annotation.imageId}`",
44
+ )
45
+
46
+ # set click data for explore gallery
47
+ gallery.set_show_all_data(
48
+ self.explore_modal_table.id,
49
+ self.get_all_data(),
50
+ )
51
+ return gallery
52
+
53
+ def get_all_data(self) -> dict:
54
+ res = {}
55
+
56
+ res["layoutTemplate"] = [None, None, None]
57
+ click_data = res.setdefault("clickData", {})
58
+ explore = click_data.setdefault("explore", {})
59
+ explore["title"] = "Explore all predictions"
60
+ images_ids = [d.pred_image_info.id for d in self.eval_result.matched_pair_data.values()]
61
+ explore["imagesIds"] = images_ids
62
+
63
+ return res
64
+
65
+ def get_click_data(self) -> Dict:
66
+ res = {}
67
+
68
+ res["layoutTemplate"] = [
69
+ {"columnTitle": "Original Image"},
70
+ {"columnTitle": "Ground Truth Masks"},
71
+ {"columnTitle": "Predicted Masks"},
72
+ ]
73
+
74
+ click_data = res.setdefault("clickData", {})
75
+
76
+ for pairs_data in self.eval_result.matched_pair_data.values():
77
+ gt = pairs_data.gt_image_info
78
+ pred = pairs_data.pred_image_info
79
+ diff = pairs_data.diff_image_info
80
+ assert gt.name == pred.name == diff.name
81
+ key = click_data.setdefault(str(pred.id), {})
82
+ key["imagesIds"] = [diff.id, gt.id, pred.id]
83
+ key["title"] = f"Image: {pred.name}"
84
+ return res
@@ -0,0 +1,101 @@
1
+ from typing import Dict
2
+
3
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
4
+ SemanticSegmVisMetric,
5
+ )
6
+ from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
7
+
8
+
9
+ class FrequentlyConfused(SemanticSegmVisMetric):
10
+
11
+ def __init__(self, *args, **kwargs) -> None:
12
+ super().__init__(*args, **kwargs)
13
+ self.clickable = True
14
+ self._keypair_sep = "-"
15
+
16
+ @property
17
+ def md(self) -> MarkdownWidget:
18
+ if self.is_empty:
19
+ text = self.vis_texts.markdown_frequently_confused_empty
20
+ else:
21
+ text = self.vis_texts.markdown_frequently_confused
22
+ return MarkdownWidget("frequently_confused", "Frequently Confused Classes", text=text)
23
+
24
+ @property
25
+ def chart(self) -> ChartWidget:
26
+ chart = ChartWidget("frequently_confused", self.get_figure())
27
+ chart.set_click_data(
28
+ self.explore_modal_table.id,
29
+ self.get_click_data(),
30
+ chart_click_extra="'getKey': (payload) => `${payload.points[0].x}`, 'keySeparator': '-',",
31
+ )
32
+ return chart
33
+
34
+ @property
35
+ def is_empty(self) -> bool:
36
+ probs, indexes_2d = self.eval_result.mp.frequently_confused
37
+ return len(probs) == 0
38
+
39
+ def get_figure(self):
40
+ import plotly.graph_objects as go # pylint: disable=import-error
41
+
42
+ fig = go.Figure()
43
+
44
+ # Frequency of Confused Classes figure
45
+ probs, indexes_2d = self.eval_result.mp.frequently_confused
46
+ confused_classes = []
47
+ for idx in indexes_2d:
48
+ gt_idx, pred_idx = idx[0], idx[1]
49
+ gt_class = self.eval_result.mp.eval_data.index[gt_idx]
50
+ pred_class = self.eval_result.mp.eval_data.index[pred_idx]
51
+ confused_classes.append(f"{gt_class}-{pred_class}")
52
+
53
+ fig = go.Figure()
54
+ fig.add_trace(
55
+ go.Bar(
56
+ x=confused_classes,
57
+ y=probs,
58
+ orientation="v",
59
+ text=probs,
60
+ marker=dict(color=probs, colorscale="Reds"),
61
+ )
62
+ )
63
+ fig.update_traces(hovertemplate="Class Pair: %{x}<br>Probability: %{y:.2f}<extra></extra>")
64
+ fig.update_layout(
65
+ xaxis_title="Class Pair",
66
+ yaxis_title="Probability",
67
+ yaxis_range=[0, max(probs) + 0.1],
68
+ yaxis=dict(showticklabels=False),
69
+ font=dict(size=24),
70
+ width=1000 if len(confused_classes) > 10 else 600,
71
+ )
72
+
73
+ return fig
74
+
75
+ def get_click_data(self) -> Dict:
76
+ if self.is_empty:
77
+ return
78
+ res = dict(projectMeta=self.eval_result.pred_project_meta.to_json())
79
+
80
+ res["layoutTemplate"] = [None, None, None]
81
+ res["clickData"] = {}
82
+
83
+ _, class_names = self.eval_result.mp.confusion_matrix
84
+ _, indexes_2d = self.eval_result.mp.frequently_confused
85
+ for idx in indexes_2d:
86
+ gt_idx, pred_idx = idx[0], idx[1]
87
+ gt_key = class_names[gt_idx]
88
+ pred_key = class_names[pred_idx]
89
+ key = f"{gt_key}{self._keypair_sep}{pred_key}"
90
+
91
+ res["clickData"][key] = {}
92
+ res["clickData"][key]["imagesIds"] = []
93
+ idx_key = str(gt_idx) + "_" + str(pred_idx)
94
+ for name in self.eval_result.mp.cmat_cell_img_names[idx_key]:
95
+ gt_img_id = self.eval_result.images_map[name]
96
+ pred_img_id = self.eval_result.matched_pair_data[gt_img_id].pred_image_info.id
97
+ res["clickData"][key]["imagesIds"].append(pred_img_id)
98
+
99
+ title = f"Confused classes. GT: '{gt_key}' ― Predicted: '{pred_key}'"
100
+ res["clickData"][key]["title"] = title
101
+ return res
@@ -0,0 +1,45 @@
1
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
2
+ SemanticSegmVisMetric,
3
+ )
4
+ from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
5
+
6
+
7
+ class IntersectionErrorOverUnion(SemanticSegmVisMetric):
8
+
9
+ @property
10
+ def md(self) -> MarkdownWidget:
11
+ return MarkdownWidget(
12
+ "intersection_error_over_union",
13
+ "Intersection & Error Over Union",
14
+ text=self.vis_texts.markdown_iou,
15
+ )
16
+
17
+ @property
18
+ def chart(self) -> ChartWidget:
19
+ return ChartWidget("intersection_error_over_union", self.get_figure())
20
+
21
+ def get_figure(self):
22
+ import plotly.graph_objects as go # pylint: disable=import-error
23
+
24
+ fig = go.Figure()
25
+
26
+ # Intersection & Error Over Union figure
27
+ labels = ["mIoU", "mBoundaryEoU", "mExtentEoU", "mSegmentEoU"]
28
+ values = [
29
+ self.eval_result.mp.iou,
30
+ self.eval_result.mp.boundary_eou,
31
+ self.eval_result.mp.extent_eou,
32
+ self.eval_result.mp.segment_eou,
33
+ ]
34
+ fig.add_trace(
35
+ go.Pie(
36
+ labels=labels,
37
+ values=values,
38
+ hole=0.5,
39
+ textposition="outside",
40
+ textinfo="percent+label",
41
+ marker=dict(colors=["#8ACAA1", "#FFE4B5", "#F7ADAA", "#dd3f3f"]),
42
+ )
43
+ )
44
+
45
+ return fig
@@ -0,0 +1,60 @@
1
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
2
+ SemanticSegmVisMetric,
3
+ )
4
+ from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
5
+
6
+
7
+ class KeyMetrics(SemanticSegmVisMetric):
8
+
9
+ @property
10
+ def md(self) -> MarkdownWidget:
11
+ return MarkdownWidget(
12
+ "markdown_header",
13
+ "Key Metrics",
14
+ text=self.vis_texts.markdown_key_metrics,
15
+ )
16
+
17
+ @property
18
+ def chart(self) -> ChartWidget:
19
+ return ChartWidget("base_metrics_chart", self.get_figure())
20
+
21
+ def get_figure(self):
22
+ import plotly.graph_objects as go # pylint: disable=import-error
23
+
24
+ fig = go.Figure()
25
+ metrics = self.eval_result.mp.key_metrics().copy()
26
+ metrics["mPixel accuracy"] = round(metrics["mPixel accuracy"] * 100, 2)
27
+ fig.add_trace(
28
+ go.Scatterpolar(
29
+ r=list(metrics.values()) + [list(metrics.values())[0]],
30
+ theta=list(metrics.keys()) + [list(metrics.keys())[0]],
31
+ # fill="toself",
32
+ hovertemplate="%{theta}: %{r:.2f}<extra></extra>",
33
+ )
34
+ )
35
+
36
+ fig.update_layout(
37
+ showlegend=False,
38
+ polar=dict(
39
+ radialaxis=dict(
40
+ range=[0, 100],
41
+ ticks="outside",
42
+ ),
43
+ angularaxis=dict(rotation=90, direction="clockwise"),
44
+ ),
45
+ dragmode=False,
46
+ margin=dict(l=25, r=25, t=25, b=25),
47
+ modebar=dict(
48
+ remove=[
49
+ "zoom2d",
50
+ "pan2d",
51
+ "select2d",
52
+ "lasso2d",
53
+ "zoomIn2d",
54
+ "zoomOut2d",
55
+ "autoScale2d",
56
+ "resetScale2d",
57
+ ]
58
+ ),
59
+ )
60
+ return fig
@@ -0,0 +1,107 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict
4
+
5
+ from supervisely.api.image_api import ImageApi, ImageInfo
6
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
7
+ SemanticSegmVisMetric,
8
+ )
9
+ from supervisely.nn.benchmark.visualization.widgets import MarkdownWidget, TableWidget
10
+
11
+
12
+ class ModelPredictions(SemanticSegmVisMetric):
13
+ MARKDOWN = "model_predictions"
14
+ TABLE = "model_predictions"
15
+
16
+ def __init__(self, *args, **kwargs) -> None:
17
+ super().__init__(*args, **kwargs)
18
+ self.clickable = True
19
+ self._row_ids = None # TODO: check if this is used
20
+
21
+ @property
22
+ def md(self) -> MarkdownWidget:
23
+ text = self.vis_texts.markdown_predictions_table
24
+ return MarkdownWidget(self.MARKDOWN, "Prediction details for every image", text)
25
+
26
+ @property
27
+ def table(self) -> TableWidget:
28
+ df = self.eval_result.mp.metric_table().round(2)
29
+
30
+ # tmp = set([d.pred_image_info.name for d in self.eval_result.matched_pair_data.values()])
31
+ # df = df[df["Image name"].isin(tmp)]
32
+ columns_options = [
33
+ {"maxWidth": "225px"},
34
+ {"maxValue": 1, "tooltip": "Pixel accuracy"},
35
+ {"maxValue": 1, "tooltip": "Precision (positive predictive value)"},
36
+ {"maxValue": 1, "tooltip": "Recall (sensitivity)"},
37
+ {"maxValue": 1, "tooltip": "F1 score (harmonic mean of precision and recall)"},
38
+ {"maxValue": 1, "tooltip": "IoU (Intersection over Union)"},
39
+ {"maxValue": 1, "tooltip": "Boundary IoU"},
40
+ {"maxValue": 1, "tooltip": "Boundary EoU"},
41
+ {"maxValue": 1, "tooltip": "Extent EoU"},
42
+ {"maxValue": 1, "tooltip": "Segment EoU"},
43
+ {"maxValue": 1, "tooltip": "Boundary EoU renormed"},
44
+ {"maxValue": 1, "tooltip": "Extent EoU renormed"},
45
+ {"maxValue": 1, "tooltip": "Segment EoU renormed"},
46
+ ]
47
+
48
+ # columns = df.columns.tolist()[1:] # exclude sly_id
49
+ columns = df.columns.tolist()
50
+ content = []
51
+
52
+ # key_mapping = {}
53
+ # for old, new in zip(ImageInfo._fields, ImageApi.info_sequence()):
54
+ # key_mapping[old] = new
55
+
56
+ self._row_ids = []
57
+ df = df.replace({float("nan"): None}) # replace NaN / float("nan") with None
58
+
59
+ for row in df.values.tolist():
60
+ # sly_id = row.pop(0)
61
+ # info = self.eval_result.matched_pair_data[sly_id].gt_image_info
62
+ name = row[0]
63
+
64
+ dct = {
65
+ "row": row,
66
+ "id": name,
67
+ "items": row,
68
+ }
69
+
70
+ self._row_ids.append(dct["id"])
71
+ content.append(dct)
72
+
73
+ data = {
74
+ "columns": columns,
75
+ "columnsOptions": columns_options,
76
+ "content": content,
77
+ }
78
+ table = TableWidget(
79
+ name=self.TABLE,
80
+ data=data,
81
+ fix_columns=1,
82
+ )
83
+ table.set_click_data(
84
+ self.explore_modal_table.id,
85
+ self.get_click_data(),
86
+ )
87
+ return table
88
+
89
+ def get_click_data(self) -> Dict:
90
+ res = {}
91
+ res["layoutTemplate"] = [
92
+ {"columnTitle": "Original Image"},
93
+ {"columnTitle": "Ground Truth Masks"},
94
+ {"columnTitle": "Predicted Masks"},
95
+ ]
96
+ click_data = res.setdefault("clickData", {})
97
+
98
+ for pairs_data in self.eval_result.matched_pair_data.values():
99
+ gt = pairs_data.gt_image_info
100
+ pred = pairs_data.pred_image_info
101
+ diff = pairs_data.diff_image_info
102
+ assert gt.name == pred.name == diff.name
103
+ key = click_data.setdefault(str(pred.name), {})
104
+ key["imagesIds"] = [diff.id, gt.id, pred.id]
105
+ key["title"] = f"Image: {pred.name}"
106
+
107
+ return res
@@ -0,0 +1,112 @@
1
+ import datetime
2
+ from typing import List
3
+
4
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
5
+ SemanticSegmVisMetric,
6
+ )
7
+ from supervisely.nn.benchmark.visualization.widgets import MarkdownWidget
8
+
9
+
10
+ class Overview(SemanticSegmVisMetric):
11
+
12
+ def get_header(self, user_login: str) -> MarkdownWidget:
13
+ current_date = datetime.datetime.now().strftime("%d %B %Y, %H:%M")
14
+ header_text = self.vis_texts.markdown_header.format(
15
+ self.eval_result.name, user_login, current_date
16
+ )
17
+ header = MarkdownWidget("markdown_header", "Header", text=header_text)
18
+ return header
19
+
20
+ @property
21
+ def overview_md(self) -> List[MarkdownWidget]:
22
+ url = self.eval_result.inference_info.get("checkpoint_url")
23
+ link_text = self.eval_result.inference_info.get("custom_checkpoint_path")
24
+ if link_text is None:
25
+ link_text = url
26
+ link_text = link_text.replace("_", "\_")
27
+
28
+ model_name = self.eval_result.inference_info.get("model_name") or "Custom"
29
+ checkpoint_name = self.eval_result.inference_info.get("deploy_params", {}).get(
30
+ "checkpoint_name", ""
31
+ )
32
+
33
+ # Note about validation dataset
34
+ classes_str, note_about_images, starter_app_info = self._get_overview_info()
35
+
36
+ formats = [
37
+ model_name.replace("_", "\_"),
38
+ checkpoint_name.replace("_", "\_"),
39
+ self.eval_result.inference_info.get("architecture"),
40
+ self.eval_result.inference_info.get("task_type"),
41
+ self.eval_result.inference_info.get("runtime"),
42
+ url,
43
+ link_text,
44
+ self.eval_result.gt_project_info.id,
45
+ self.eval_result.gt_project_info.name,
46
+ classes_str,
47
+ note_about_images,
48
+ starter_app_info,
49
+ self.vis_texts.docs_url,
50
+ ]
51
+
52
+ md = MarkdownWidget(
53
+ "markdown_overview",
54
+ "Overview",
55
+ text=self.vis_texts.markdown_overview.format(*formats),
56
+ )
57
+ md.is_info_block = True
58
+ md.width_fit_content = True
59
+ return md
60
+
61
+ def _get_overview_info(self):
62
+ classes_cnt = len(self.eval_result.classes_whitelist)
63
+ classes_str = "classes" if classes_cnt > 1 else "class"
64
+ classes_str = f"{classes_cnt} {classes_str}"
65
+
66
+ evaluator_session, train_session, images_str = None, None, ""
67
+ gt_project_id = self.eval_result.gt_project_info.id
68
+ gt_dataset_ids = self.eval_result.gt_dataset_ids
69
+ gt_images_cnt = self.eval_result.val_images_cnt
70
+ train_info = self.eval_result.train_info
71
+ evaluator_app_info = self.eval_result.evaluator_app_info
72
+ total_imgs_cnt = self.eval_result.gt_project_info.items_count
73
+ if gt_images_cnt is not None:
74
+ val_imgs_cnt = gt_images_cnt
75
+ elif gt_dataset_ids is not None:
76
+ datasets = self.eval_result.gt_dataset_infos
77
+ val_imgs_cnt = sum(ds.items_count for ds in datasets)
78
+ else:
79
+ val_imgs_cnt = total_imgs_cnt
80
+
81
+ if train_info:
82
+ train_task_id = train_info.get("app_session_id")
83
+ if train_task_id:
84
+ app_id = self.eval_result.task_info["meta"]["app"]["id"]
85
+ train_session = f'- **Training dashboard**: <a href="/apps/{app_id}/sessions/{train_task_id}" target="_blank">open</a>'
86
+
87
+ train_imgs_cnt = train_info.get("images_count")
88
+ images_str = f", {train_imgs_cnt} images in train, {val_imgs_cnt} images in validation"
89
+
90
+ if gt_images_cnt is not None:
91
+ images_str += (
92
+ f", total {total_imgs_cnt} images. Evaluated using subset - {val_imgs_cnt} images"
93
+ )
94
+ elif gt_dataset_ids is not None:
95
+ links = [
96
+ f'<a href="/projects/{gt_project_id}/datasets/{ds.id}" target="_blank">{ds.name}</a>'
97
+ for ds in datasets
98
+ ]
99
+ images_str += f", total {total_imgs_cnt} images. Evaluated on the dataset{'s' if len(links) > 1 else ''}: {', '.join(links)}"
100
+ else:
101
+ images_str += f", total {total_imgs_cnt} images. Evaluated on the whole project ({val_imgs_cnt} images)"
102
+
103
+ if evaluator_app_info:
104
+ evaluator_task_id = evaluator_app_info.get("id")
105
+ evaluator_app_id = evaluator_app_info.get("meta", {}).get("app", {}).get("id")
106
+ evaluator_app_name = evaluator_app_info.get("meta", {}).get("app", {}).get("name")
107
+ if evaluator_task_id and evaluator_app_id and evaluator_app_name:
108
+ evaluator_session = f'- **Evaluator app session**: <a href="/apps/{evaluator_app_id}/sessions/{evaluator_task_id}" target="_blank">open</a>'
109
+
110
+ starter_app_info = train_session or evaluator_session or ""
111
+
112
+ return classes_str, images_str, starter_app_info
@@ -0,0 +1,48 @@
1
+ from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
2
+ SemanticSegmVisMetric,
3
+ )
4
+ from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
5
+
6
+
7
+ class RenormalizedErrorOverUnion(SemanticSegmVisMetric):
8
+
9
+ @property
10
+ def md(self) -> MarkdownWidget:
11
+ return MarkdownWidget(
12
+ "renormalized_error_over_union",
13
+ "Renormalized Error over Union",
14
+ text=self.vis_texts.markdown_renormalized_error_ou,
15
+ )
16
+
17
+ @property
18
+ def chart(self) -> ChartWidget:
19
+ return ChartWidget("intersection_error_over_union", self.get_figure())
20
+
21
+ def get_figure(self):
22
+ import plotly.graph_objects as go # pylint: disable=import-error
23
+
24
+ fig = go.Figure()
25
+
26
+ # Renormalized Error over Union figure
27
+ labels = ["Boundary EoU", "Extent EoU", "Segment EoU"]
28
+ values = [
29
+ self.eval_result.mp.boundary_renormed_eou,
30
+ self.eval_result.mp.extent_renormed_eou,
31
+ self.eval_result.mp.segment_renormed_eou,
32
+ ]
33
+ fig.add_trace(
34
+ go.Bar(
35
+ x=labels,
36
+ y=values,
37
+ orientation="v",
38
+ text=values,
39
+ width=[0.5, 0.5, 0.5],
40
+ textposition="outside",
41
+ marker_color=["#FFE4B5", "#F7ADAA", "#dd3f3f"],
42
+ hovertemplate="%{x}: %{y:.2f}<extra></extra>",
43
+ )
44
+ )
45
+ fig.update_traces(hovertemplate="%{x}: %{y:.2f}<extra></extra>")
46
+ fig.update_layout(width=600)
47
+
48
+ return fig