supervisely 6.73.237__py3-none-any.whl → 6.73.239__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/annotation/annotation.py +2 -2
- supervisely/api/entity_annotation/tag_api.py +11 -4
- supervisely/geometry/rectangle.py +7 -8
- supervisely/nn/__init__.py +1 -0
- supervisely/nn/benchmark/__init__.py +14 -2
- supervisely/nn/benchmark/base_benchmark.py +84 -37
- supervisely/nn/benchmark/base_evaluator.py +120 -0
- supervisely/nn/benchmark/base_visualizer.py +265 -0
- supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
- supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
- supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
- supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
- supervisely/nn/benchmark/object_detection/__init__.py +0 -0
- supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
- supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
- supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
- supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
- supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
- supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
- supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
- supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
- supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
- supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
- supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
- supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
- supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
- supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
- supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
- supervisely/nn/benchmark/utils/__init__.py +12 -0
- supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
- supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
- supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
- supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
- supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
- supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
- supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
- supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
- supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
- supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
- supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
- supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
- supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
- supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
- supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
- supervisely/project/project.py +18 -6
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/METADATA +3 -1
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/RECORD +104 -82
- supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/__init__.py +0 -3
- supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
- supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
- supervisely/nn/benchmark/utils.py +0 -13
- supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
- supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
- supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
- supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
- supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
- supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
- supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
- supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
- supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
- supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
- supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
- supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
- supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
- supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
- supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
- supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
- supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
- supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
- supervisely/nn/benchmark/visualization/visualizer.py +0 -729
- /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/LICENSE +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/WHEEL +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/top_level.txt +0 -0
supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py
RENAMED
|
@@ -1,46 +1,42 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
4
|
-
|
|
5
3
|
import numpy as np
|
|
6
4
|
import pandas as pd
|
|
7
5
|
|
|
8
|
-
from supervisely.nn.benchmark.
|
|
9
|
-
from supervisely.nn.benchmark.visualization.
|
|
10
|
-
|
|
11
|
-
if TYPE_CHECKING:
|
|
12
|
-
from supervisely.nn.benchmark.visualization.visualizer import Visualizer
|
|
6
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
7
|
+
from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
|
|
13
8
|
|
|
14
9
|
|
|
15
|
-
class F1ScoreAtDifferentIOU(
|
|
10
|
+
class F1ScoreAtDifferentIOU(DetectionVisMetric):
|
|
11
|
+
MARKDOWN = "f1_score_at_iou"
|
|
12
|
+
CHART = "f1_score_at_iou"
|
|
16
13
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
self.
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
is_header=True,
|
|
24
|
-
formats=[self._loader.vis_texts.definitions.iou_threshold],
|
|
25
|
-
),
|
|
26
|
-
chart=Widget.Chart(),
|
|
14
|
+
@property
|
|
15
|
+
def md(self) -> MarkdownWidget:
|
|
16
|
+
return MarkdownWidget(
|
|
17
|
+
self.MARKDOWN,
|
|
18
|
+
"Confidence Profile at Different IoU thresholds",
|
|
19
|
+
self.vis_texts.markdown_f1_at_ious,
|
|
27
20
|
)
|
|
28
21
|
|
|
29
|
-
|
|
22
|
+
@property
|
|
23
|
+
def chart(self) -> ChartWidget:
|
|
24
|
+
return ChartWidget(self.CHART, self._get_figure())
|
|
25
|
+
|
|
26
|
+
def _get_figure(self): # -> go.Figure:
|
|
30
27
|
import plotly.express as px # pylint: disable=import-error
|
|
31
28
|
|
|
32
|
-
|
|
33
|
-
f1s = self._loader.mp.m_full.score_profile_f1s
|
|
29
|
+
f1s = self.eval_result.mp.m_full.score_profile_f1s
|
|
34
30
|
|
|
35
31
|
# downsample
|
|
36
|
-
if len(self.
|
|
32
|
+
if len(self.eval_result.df_score_profile) > 5000:
|
|
37
33
|
f1s_down = f1s[:, :: f1s.shape[1] // 1000]
|
|
38
34
|
else:
|
|
39
35
|
f1s_down = f1s
|
|
40
36
|
|
|
41
|
-
iou_names = list(map(lambda x: str(round(x, 2)), self.
|
|
37
|
+
iou_names = list(map(lambda x: str(round(x, 2)), self.eval_result.mp.iouThrs.tolist()))
|
|
42
38
|
df = pd.DataFrame(
|
|
43
|
-
np.concatenate([self.
|
|
39
|
+
np.concatenate([self.eval_result.dfsp_down["scores"].values[:, None], f1s_down.T], 1),
|
|
44
40
|
columns=["scores"] + iou_names,
|
|
45
41
|
)
|
|
46
42
|
labels = {"value": "Value", "variable": "IoU threshold", "scores": "Confidence Score"}
|
|
@@ -49,7 +45,6 @@ class F1ScoreAtDifferentIOU(MetricVis):
|
|
|
49
45
|
df,
|
|
50
46
|
x="scores",
|
|
51
47
|
y=iou_names,
|
|
52
|
-
# title="F1-Score at different IoU Thresholds",
|
|
53
48
|
labels=labels,
|
|
54
49
|
color_discrete_sequence=px.colors.sequential.Viridis,
|
|
55
50
|
width=None,
|
|
@@ -64,7 +59,7 @@ class F1ScoreAtDifferentIOU(MetricVis):
|
|
|
64
59
|
for i, iou in enumerate(iou_names):
|
|
65
60
|
argmax_f1 = f1s[i].argmax()
|
|
66
61
|
max_f1 = f1s[i][argmax_f1]
|
|
67
|
-
score = self.
|
|
62
|
+
score = self.eval_result.mp.m_full.score_profile["scores"][argmax_f1]
|
|
68
63
|
fig.add_annotation(
|
|
69
64
|
x=score,
|
|
70
65
|
y=max_f1,
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Literal
|
|
4
|
+
|
|
5
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
6
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
7
|
+
ChartWidget,
|
|
8
|
+
ContainerWidget,
|
|
9
|
+
MarkdownWidget,
|
|
10
|
+
RadioGroupWidget,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class FrequentlyConfused(DetectionVisMetric):
|
|
15
|
+
MARKDOWN = "frequently_confused"
|
|
16
|
+
MARKDOWN_EMPTY = "frequently_confused_empty"
|
|
17
|
+
CHART = "frequently_confused"
|
|
18
|
+
RADIO_GROUP = "frequently_confused_radio_group"
|
|
19
|
+
|
|
20
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
21
|
+
super().__init__(*args, **kwargs)
|
|
22
|
+
self.clickable = True
|
|
23
|
+
self.df = self.eval_result.mp.frequently_confused()
|
|
24
|
+
self._keypair_sep: str = "-"
|
|
25
|
+
self.is_empty = self.df.empty
|
|
26
|
+
self.switchable = True
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def md(self) -> MarkdownWidget:
|
|
30
|
+
text = self.vis_texts.markdown_frequently_confused
|
|
31
|
+
pair = self.df["category_pair"][0]
|
|
32
|
+
prob = self.df["probability"][0]
|
|
33
|
+
text = text.format(
|
|
34
|
+
pair[0],
|
|
35
|
+
pair[1],
|
|
36
|
+
prob.round(2),
|
|
37
|
+
pair[0],
|
|
38
|
+
pair[1],
|
|
39
|
+
(prob * 100).round(),
|
|
40
|
+
pair[0],
|
|
41
|
+
pair[1],
|
|
42
|
+
pair[1],
|
|
43
|
+
pair[0],
|
|
44
|
+
)
|
|
45
|
+
return MarkdownWidget(self.MARKDOWN, "Frequently Confused Classes", text)
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def chart(self) -> ContainerWidget:
|
|
49
|
+
return ContainerWidget(
|
|
50
|
+
[self.radio_group(), self._get_chart("probability"), self._get_chart("count")],
|
|
51
|
+
self.CHART,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
def radio_group(self) -> RadioGroupWidget:
|
|
55
|
+
return RadioGroupWidget(
|
|
56
|
+
"Probability or Count",
|
|
57
|
+
self.RADIO_GROUP,
|
|
58
|
+
["probability", "count"],
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
def _get_chart(self, switch_key: Literal["probability", "count"]) -> ChartWidget:
|
|
62
|
+
chart = ChartWidget(
|
|
63
|
+
self.CHART,
|
|
64
|
+
self._get_figure(switch_key),
|
|
65
|
+
switch_key=switch_key,
|
|
66
|
+
switchable=self.switchable,
|
|
67
|
+
radiogroup_id=self.RADIO_GROUP,
|
|
68
|
+
)
|
|
69
|
+
chart.set_click_data(
|
|
70
|
+
self.explore_modal_table.id,
|
|
71
|
+
self.get_click_data(),
|
|
72
|
+
chart_click_extra="'getKey': (payload) => `${payload.points[0].x}`, 'keySeparator': '-',",
|
|
73
|
+
)
|
|
74
|
+
return chart
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def empty_md(self) -> MarkdownWidget:
|
|
78
|
+
text = self.vis_texts.markdown_frequently_confused_empty
|
|
79
|
+
return MarkdownWidget(self.MARKDOWN_EMPTY, "Frequently Confused Classes", text)
|
|
80
|
+
|
|
81
|
+
def _get_figure(self, switch_key: Literal["probability", "count"]): # -> go.Figure:
|
|
82
|
+
if self.is_empty:
|
|
83
|
+
return
|
|
84
|
+
|
|
85
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
86
|
+
|
|
87
|
+
# Frequency of confusion as bar chart
|
|
88
|
+
confused_df = self.eval_result.mp.frequently_confused()
|
|
89
|
+
confused_name_pairs = confused_df["category_pair"]
|
|
90
|
+
x_labels = [f"{pair[0]} - {pair[1]}" for pair in confused_name_pairs]
|
|
91
|
+
y_labels = confused_df[switch_key]
|
|
92
|
+
|
|
93
|
+
fig = go.Figure()
|
|
94
|
+
fig.add_trace(
|
|
95
|
+
go.Bar(x=x_labels, y=y_labels, marker=dict(color=y_labels, colorscale="Reds"))
|
|
96
|
+
)
|
|
97
|
+
fig.update_layout(
|
|
98
|
+
# title="Frequently confused class pairs",
|
|
99
|
+
xaxis_title="Class Pair",
|
|
100
|
+
yaxis_title=y_labels.name.capitalize(),
|
|
101
|
+
width=1000 if len(x_labels) > 10 else 600,
|
|
102
|
+
)
|
|
103
|
+
fig.update_traces(text=y_labels.round(2))
|
|
104
|
+
fig.update_traces(
|
|
105
|
+
hovertemplate="Class Pair: %{x}<br>"
|
|
106
|
+
+ y_labels.name.capitalize()
|
|
107
|
+
+ ": %{y:.2f}<extra></extra>"
|
|
108
|
+
)
|
|
109
|
+
return fig
|
|
110
|
+
|
|
111
|
+
def get_click_data(self) -> Dict:
|
|
112
|
+
if not self.clickable or self.is_empty:
|
|
113
|
+
return
|
|
114
|
+
res = dict(projectMeta=self.eval_result.pred_project_meta.to_json())
|
|
115
|
+
|
|
116
|
+
res["layoutTemplate"] = [None, None, None]
|
|
117
|
+
res["clickData"] = {}
|
|
118
|
+
|
|
119
|
+
for keypair, v in self.eval_result.click_data.frequently_confused.items():
|
|
120
|
+
subkey1, subkey2 = keypair
|
|
121
|
+
key = f"{subkey1} {self._keypair_sep} {subkey2}"
|
|
122
|
+
res["clickData"][key] = {}
|
|
123
|
+
res["clickData"][key]["imagesIds"] = []
|
|
124
|
+
res["clickData"][key]["title"] = f"Confused classes: {subkey1} - {subkey2}"
|
|
125
|
+
|
|
126
|
+
img_ids = set()
|
|
127
|
+
obj_ids = set()
|
|
128
|
+
for x in v:
|
|
129
|
+
img_ids.add(x["dt_img_id"])
|
|
130
|
+
obj_ids.add(x["dt_obj_id"])
|
|
131
|
+
|
|
132
|
+
res["clickData"][key]["imagesIds"] = list(img_ids)
|
|
133
|
+
res["clickData"][key]["filters"] = [
|
|
134
|
+
{"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
|
|
135
|
+
]
|
|
136
|
+
|
|
137
|
+
return res
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
4
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
5
|
+
ChartWidget,
|
|
6
|
+
CollapseWidget,
|
|
7
|
+
MarkdownWidget,
|
|
8
|
+
NotificationWidget,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class IOUDistribution(DetectionVisMetric):
|
|
13
|
+
MARKDOWN_LOCALIZATION_ACCURACY = "localization_accuracy"
|
|
14
|
+
MARKDOWN_IOU_DISTRIBUTION = "iou_distribution"
|
|
15
|
+
NOTIFICATION = "iou_distribution"
|
|
16
|
+
COLLAPSE = "iou_distribution"
|
|
17
|
+
CHART = "iou_distribution"
|
|
18
|
+
|
|
19
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
20
|
+
super().__init__(*args, **kwargs)
|
|
21
|
+
self.md_title = "Localization Accuracy (IoU)"
|
|
22
|
+
|
|
23
|
+
@property
|
|
24
|
+
def md(self) -> MarkdownWidget:
|
|
25
|
+
text = self.vis_texts.markdown_localization_accuracy
|
|
26
|
+
text = text.format(self.vis_texts.definitions.iou_score)
|
|
27
|
+
return MarkdownWidget(self.MARKDOWN_LOCALIZATION_ACCURACY, self.md_title, text)
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def md_iou_distribution(self) -> MarkdownWidget:
|
|
31
|
+
text = self.vis_texts.markdown_iou_distribution
|
|
32
|
+
text = text.format(self.vis_texts.definitions.iou_score)
|
|
33
|
+
return MarkdownWidget(self.MARKDOWN_IOU_DISTRIBUTION, self.md_title, text)
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def notification(self) -> NotificationWidget:
|
|
37
|
+
title, _ = self.vis_texts.notification_avg_iou.values()
|
|
38
|
+
return NotificationWidget(
|
|
39
|
+
self.NOTIFICATION,
|
|
40
|
+
title.format(self.eval_result.mp.base_metrics()["iou"].round(2)),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def chart(self) -> ChartWidget:
|
|
45
|
+
return ChartWidget(self.CHART, self._get_figure())
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def collapse(self) -> CollapseWidget:
|
|
49
|
+
md1 = MarkdownWidget(
|
|
50
|
+
"iou_calculation",
|
|
51
|
+
"How IoU is calculated?",
|
|
52
|
+
self.vis_texts.markdown_iou_calculation,
|
|
53
|
+
)
|
|
54
|
+
md2 = MarkdownWidget(
|
|
55
|
+
"what_is_pr_curve",
|
|
56
|
+
"How the PR curve is built?",
|
|
57
|
+
self.vis_texts.markdown_what_is_pr_curve.format(
|
|
58
|
+
self.vis_texts.definitions.confidence_score,
|
|
59
|
+
self.vis_texts.definitions.true_positives,
|
|
60
|
+
self.vis_texts.definitions.false_positives,
|
|
61
|
+
),
|
|
62
|
+
)
|
|
63
|
+
return CollapseWidget([md1, md2])
|
|
64
|
+
|
|
65
|
+
def _get_figure(self): # -> go.Figure:
|
|
66
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
67
|
+
|
|
68
|
+
fig = go.Figure()
|
|
69
|
+
nbins = 40
|
|
70
|
+
fig.add_trace(go.Histogram(x=self.eval_result.mp.ious, nbinsx=nbins))
|
|
71
|
+
fig.update_layout(
|
|
72
|
+
xaxis_title="IoU",
|
|
73
|
+
yaxis_title="Count",
|
|
74
|
+
width=600,
|
|
75
|
+
height=500,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Add annotation for mean IoU as vertical line
|
|
79
|
+
mean_iou = self.eval_result.mp.ious.mean()
|
|
80
|
+
y1 = len(self.eval_result.mp.ious) // nbins
|
|
81
|
+
fig.add_shape(
|
|
82
|
+
type="line",
|
|
83
|
+
x0=mean_iou,
|
|
84
|
+
x1=mean_iou,
|
|
85
|
+
y0=0,
|
|
86
|
+
y1=y1,
|
|
87
|
+
line=dict(color="orange", width=2, dash="dash"),
|
|
88
|
+
)
|
|
89
|
+
fig.update_traces(hovertemplate="IoU: %{x:.2f}<br>Count: %{y}<extra></extra>")
|
|
90
|
+
fig.add_annotation(x=mean_iou, y=y1, text=f"Mean IoU: {mean_iou:.2f}", showarrow=False)
|
|
91
|
+
fig.update_layout(
|
|
92
|
+
dragmode=False,
|
|
93
|
+
modebar=dict(
|
|
94
|
+
remove=[
|
|
95
|
+
"zoom2d",
|
|
96
|
+
"pan2d",
|
|
97
|
+
"select2d",
|
|
98
|
+
"lasso2d",
|
|
99
|
+
"zoomIn2d",
|
|
100
|
+
"zoomOut2d",
|
|
101
|
+
"autoScale2d",
|
|
102
|
+
"resetScale2d",
|
|
103
|
+
]
|
|
104
|
+
),
|
|
105
|
+
)
|
|
106
|
+
return fig
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict
|
|
4
|
+
|
|
5
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
6
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
7
|
+
ChartWidget,
|
|
8
|
+
MarkdownWidget,
|
|
9
|
+
TableWidget,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class KeyMetrics(DetectionVisMetric):
|
|
14
|
+
MARKDOWN = "key_metrics"
|
|
15
|
+
CHART = "key_metrics"
|
|
16
|
+
TABLE = "key_metrics"
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def md(self) -> MarkdownWidget:
|
|
20
|
+
text = self.vis_texts.markdown_key_metrics.format(
|
|
21
|
+
self.vis_texts.definitions.iou_threshold,
|
|
22
|
+
self.vis_texts.definitions.average_precision,
|
|
23
|
+
self.vis_texts.definitions.confidence_score,
|
|
24
|
+
)
|
|
25
|
+
return MarkdownWidget(self.MARKDOWN, "Key Metrics", text)
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def table(self) -> TableWidget:
|
|
29
|
+
columns = ["metrics", "values"]
|
|
30
|
+
content = []
|
|
31
|
+
for metric, value in self.eval_result.mp.metric_table().items():
|
|
32
|
+
row = [metric, round(value, 2)]
|
|
33
|
+
dct = {
|
|
34
|
+
"row": row,
|
|
35
|
+
"id": metric,
|
|
36
|
+
"items": row,
|
|
37
|
+
}
|
|
38
|
+
content.append(dct)
|
|
39
|
+
|
|
40
|
+
columns_options = [
|
|
41
|
+
{"disableSort": True}, # , "ustomCell": True},
|
|
42
|
+
{"disableSort": True},
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
data = {
|
|
46
|
+
"columns": columns,
|
|
47
|
+
"columnsOptions": columns_options,
|
|
48
|
+
"content": content,
|
|
49
|
+
}
|
|
50
|
+
table = TableWidget(
|
|
51
|
+
name=self.TABLE,
|
|
52
|
+
data=data,
|
|
53
|
+
fix_columns=1,
|
|
54
|
+
width="60%",
|
|
55
|
+
show_header_controls=False,
|
|
56
|
+
main_column=columns[0],
|
|
57
|
+
)
|
|
58
|
+
return table
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def chart(self) -> ChartWidget:
|
|
62
|
+
return ChartWidget(self.CHART, self._get_figure())
|
|
63
|
+
|
|
64
|
+
def _get_figure(self): # -> go.Figure:
|
|
65
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
66
|
+
|
|
67
|
+
# Overall Metrics
|
|
68
|
+
base_metrics = self.eval_result.mp.base_metrics()
|
|
69
|
+
r = list(base_metrics.values())
|
|
70
|
+
theta = [self.eval_result.mp.metric_names[k] for k in base_metrics.keys()]
|
|
71
|
+
fig = go.Figure()
|
|
72
|
+
fig.add_trace(
|
|
73
|
+
go.Scatterpolar(
|
|
74
|
+
r=r + [r[0]],
|
|
75
|
+
theta=theta + [theta[0]],
|
|
76
|
+
# fill="toself",
|
|
77
|
+
name="Overall Metrics",
|
|
78
|
+
hovertemplate="%{theta}: %{r:.2f}<extra></extra>",
|
|
79
|
+
)
|
|
80
|
+
)
|
|
81
|
+
fig.update_layout(
|
|
82
|
+
polar=dict(
|
|
83
|
+
radialaxis=dict(
|
|
84
|
+
range=[0.0, 1.0],
|
|
85
|
+
ticks="outside",
|
|
86
|
+
),
|
|
87
|
+
angularaxis=dict(rotation=90, direction="clockwise"),
|
|
88
|
+
),
|
|
89
|
+
dragmode=False,
|
|
90
|
+
margin=dict(l=25, r=25, t=25, b=25),
|
|
91
|
+
)
|
|
92
|
+
fig.update_layout(
|
|
93
|
+
modebar=dict(
|
|
94
|
+
remove=[
|
|
95
|
+
"zoom2d",
|
|
96
|
+
"pan2d",
|
|
97
|
+
"select2d",
|
|
98
|
+
"lasso2d",
|
|
99
|
+
"zoomIn2d",
|
|
100
|
+
"zoomOut2d",
|
|
101
|
+
"autoScale2d",
|
|
102
|
+
"resetScale2d",
|
|
103
|
+
]
|
|
104
|
+
)
|
|
105
|
+
)
|
|
106
|
+
return fig
|
|
107
|
+
|
|
108
|
+
def get_click_data(self) -> Dict:
|
|
109
|
+
if not self.clickable:
|
|
110
|
+
return
|
|
111
|
+
res = {}
|
|
112
|
+
|
|
113
|
+
res["layoutTemplate"] = [None, None, None]
|
|
114
|
+
res["clickData"] = {}
|
|
115
|
+
for outcome, matches_data in self.eval_result.click_data.outcome_counts.items():
|
|
116
|
+
res["clickData"][outcome] = {}
|
|
117
|
+
res["clickData"][outcome]["imagesIds"] = []
|
|
118
|
+
|
|
119
|
+
img_ids = set()
|
|
120
|
+
for match_data in matches_data:
|
|
121
|
+
pairs_data = self.eval_result.matched_pair_data[match_data["gt_img_id"]]
|
|
122
|
+
if outcome == "FN":
|
|
123
|
+
img_ids.add(pairs_data.diff_image_info.id)
|
|
124
|
+
else:
|
|
125
|
+
img_ids.add(pairs_data.pred_image_info.id)
|
|
126
|
+
|
|
127
|
+
res["clickData"][outcome][
|
|
128
|
+
"title"
|
|
129
|
+
] = f"{outcome}: {len(matches_data)} object{'s' if len(matches_data) > 1 else ''}"
|
|
130
|
+
res["clickData"][outcome]["imagesIds"] = list(img_ids)
|
|
131
|
+
res["clickData"][outcome]["filters"] = [
|
|
132
|
+
{"type": "tag", "tagId": "confidence", "value": [0, 1]},
|
|
133
|
+
{"type": "tag", "tagId": "outcome", "value": outcome},
|
|
134
|
+
]
|
|
135
|
+
|
|
136
|
+
return res
|
supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py
RENAMED
|
@@ -1,42 +1,30 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Dict
|
|
4
4
|
|
|
5
|
-
from supervisely.api.image_api import ImageInfo
|
|
6
|
-
from supervisely.nn.benchmark.
|
|
7
|
-
from supervisely.nn.benchmark.visualization.
|
|
5
|
+
from supervisely.api.image_api import ImageApi, ImageInfo
|
|
6
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
7
|
+
from supervisely.nn.benchmark.visualization.widgets import MarkdownWidget, TableWidget
|
|
8
8
|
|
|
9
|
-
if TYPE_CHECKING:
|
|
10
|
-
from supervisely.nn.benchmark.visualization.visualizer import Visualizer
|
|
11
9
|
|
|
10
|
+
class ModelPredictions(DetectionVisMetric):
|
|
11
|
+
MARKDOWN = "model_predictions"
|
|
12
|
+
TABLE = "model_predictions"
|
|
12
13
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def __init__(self, loader: Visualizer) -> None:
|
|
16
|
-
super().__init__(loader)
|
|
14
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
15
|
+
super().__init__(*args, **kwargs)
|
|
17
16
|
self.clickable = True
|
|
18
|
-
self.
|
|
19
|
-
self._loader.vis_texts,
|
|
20
|
-
markdown_predictions_gallery=Widget.Markdown(
|
|
21
|
-
title="Model Predictions", is_header=False
|
|
22
|
-
),
|
|
23
|
-
markdown_predictions_table=Widget.Markdown(
|
|
24
|
-
title="Prediction details for every image", is_header=True
|
|
25
|
-
),
|
|
26
|
-
# gallery=Widget.Gallery(is_table_gallery=True),
|
|
27
|
-
table=Widget.Table(),
|
|
28
|
-
)
|
|
29
|
-
self._row_ids = None
|
|
17
|
+
self._row_ids = None # TODO: check if this is used
|
|
30
18
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
19
|
+
@property
|
|
20
|
+
def md(self) -> MarkdownWidget:
|
|
21
|
+
text = self.vis_texts.markdown_predictions_table
|
|
22
|
+
return MarkdownWidget(self.MARKDOWN, "Prediction details for every image", text)
|
|
34
23
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
df = self._loader.mp.prediction_table().round(2)
|
|
24
|
+
@property
|
|
25
|
+
def table(self) -> TableWidget:
|
|
26
|
+
tmp = set([d.pred_image_info.name for d in self.eval_result.matched_pair_data.values()])
|
|
27
|
+
df = self.eval_result.mp.prediction_table().round(2)
|
|
40
28
|
df = df[df["Image name"].isin(tmp)]
|
|
41
29
|
columns_options = [
|
|
42
30
|
{"maxWidth": "225px"},
|
|
@@ -58,22 +46,20 @@ class ModelPredictions(MetricVis):
|
|
|
58
46
|
{"maxValue": 1, "tooltip": "Recall (sensitivity)"},
|
|
59
47
|
{"maxValue": 1, "tooltip": "F1 score (harmonic mean of precision and recall)"},
|
|
60
48
|
]
|
|
61
|
-
table_model_preds = widget.table(df, columns_options=columns_options)
|
|
62
|
-
tbl = table_model_preds.to_json()
|
|
63
49
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
res["content"] = []
|
|
50
|
+
columns = df.columns.tolist()[1:] # exclude sly_id
|
|
51
|
+
content = []
|
|
67
52
|
|
|
68
53
|
key_mapping = {}
|
|
69
|
-
for old, new in zip(ImageInfo._fields,
|
|
54
|
+
for old, new in zip(ImageInfo._fields, ImageApi.info_sequence()):
|
|
70
55
|
key_mapping[old] = new
|
|
71
56
|
|
|
72
57
|
self._row_ids = []
|
|
58
|
+
df = df.replace({float("nan"): None}) # replace NaN / float("nan") with None
|
|
73
59
|
|
|
74
|
-
for row in
|
|
60
|
+
for row in df.values.tolist():
|
|
75
61
|
sly_id = row.pop(0)
|
|
76
|
-
info = self.
|
|
62
|
+
info = self.eval_result.matched_pair_data[sly_id].gt_image_info
|
|
77
63
|
|
|
78
64
|
dct = {
|
|
79
65
|
"row": {key_mapping[k]: v for k, v in info._asdict().items()},
|
|
@@ -82,11 +68,25 @@ class ModelPredictions(MetricVis):
|
|
|
82
68
|
}
|
|
83
69
|
|
|
84
70
|
self._row_ids.append(dct["id"])
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
71
|
+
content.append(dct)
|
|
72
|
+
|
|
73
|
+
data = {
|
|
74
|
+
"columns": columns,
|
|
75
|
+
"columnsOptions": columns_options,
|
|
76
|
+
"content": content,
|
|
77
|
+
}
|
|
78
|
+
table = TableWidget(
|
|
79
|
+
name=self.TABLE,
|
|
80
|
+
data=data,
|
|
81
|
+
fix_columns=1,
|
|
82
|
+
)
|
|
83
|
+
table.set_click_data(
|
|
84
|
+
self.explore_modal_table.id,
|
|
85
|
+
self.get_click_data(),
|
|
86
|
+
)
|
|
87
|
+
return table
|
|
88
88
|
|
|
89
|
-
def
|
|
89
|
+
def get_click_data(self) -> Dict:
|
|
90
90
|
res = {}
|
|
91
91
|
res["layoutTemplate"] = [
|
|
92
92
|
{"skipObjectTagsFiltering": True, "columnTitle": "Ground Truth"},
|
|
@@ -96,21 +96,25 @@ class ModelPredictions(MetricVis):
|
|
|
96
96
|
click_data = res.setdefault("clickData", {})
|
|
97
97
|
|
|
98
98
|
default_filters = [
|
|
99
|
-
{
|
|
99
|
+
{
|
|
100
|
+
"type": "tag",
|
|
101
|
+
"tagId": "confidence",
|
|
102
|
+
"value": [self.eval_result.mp.f1_optimal_conf, 1],
|
|
103
|
+
},
|
|
100
104
|
# {"type": "tag", "tagId": "outcome", "value": "FP"},
|
|
101
105
|
]
|
|
102
106
|
|
|
103
|
-
for
|
|
104
|
-
gt =
|
|
105
|
-
pred =
|
|
106
|
-
diff =
|
|
107
|
+
for pairs_data in self.eval_result.matched_pair_data.values():
|
|
108
|
+
gt = pairs_data.gt_image_info
|
|
109
|
+
pred = pairs_data.pred_image_info
|
|
110
|
+
diff = pairs_data.diff_image_info
|
|
107
111
|
assert gt.name == pred.name == diff.name
|
|
108
112
|
key = click_data.setdefault(str(pred.name), {})
|
|
109
113
|
key["imagesIds"] = [gt.id, pred.id, diff.id]
|
|
110
114
|
key["filters"] = default_filters
|
|
111
115
|
key["title"] = f"Image: {pred.name}"
|
|
112
116
|
image_id = pred.id
|
|
113
|
-
ann_json =
|
|
117
|
+
ann_json = pairs_data.pred_annotation.to_json()
|
|
114
118
|
assert image_id == pred.id
|
|
115
119
|
object_bindings = []
|
|
116
120
|
for obj in ann_json["objects"]:
|
|
@@ -130,7 +134,7 @@ class ModelPredictions(MetricVis):
|
|
|
130
134
|
)
|
|
131
135
|
|
|
132
136
|
image_id = diff.id
|
|
133
|
-
ann_json =
|
|
137
|
+
ann_json = pairs_data.diff_annotation.to_json()
|
|
134
138
|
assert image_id == diff.id
|
|
135
139
|
for obj in ann_json["objects"]:
|
|
136
140
|
for tag in obj["tags"]:
|