supervisely 6.73.237__py3-none-any.whl → 6.73.239__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/annotation/annotation.py +2 -2
- supervisely/api/entity_annotation/tag_api.py +11 -4
- supervisely/geometry/rectangle.py +7 -8
- supervisely/nn/__init__.py +1 -0
- supervisely/nn/benchmark/__init__.py +14 -2
- supervisely/nn/benchmark/base_benchmark.py +84 -37
- supervisely/nn/benchmark/base_evaluator.py +120 -0
- supervisely/nn/benchmark/base_visualizer.py +265 -0
- supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
- supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
- supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
- supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
- supervisely/nn/benchmark/object_detection/__init__.py +0 -0
- supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
- supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
- supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
- supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
- supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
- supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
- supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
- supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
- supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
- supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
- supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
- supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
- supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
- supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
- supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
- supervisely/nn/benchmark/utils/__init__.py +12 -0
- supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
- supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
- supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
- supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
- supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
- supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
- supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
- supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
- supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
- supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
- supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
- supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
- supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
- supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
- supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
- supervisely/project/project.py +18 -6
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/METADATA +3 -1
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/RECORD +104 -82
- supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/__init__.py +0 -3
- supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
- supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
- supervisely/nn/benchmark/utils.py +0 -13
- supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
- supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
- supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
- supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
- supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
- supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
- supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
- supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
- supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
- supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
- supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
- supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
- supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
- supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
- supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
- supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
- supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
- supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
- supervisely/nn/benchmark/visualization/visualizer.py +0 -729
- /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/LICENSE +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/WHEEL +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
from typing import List, Union
|
|
2
|
+
|
|
3
|
+
from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
|
|
4
|
+
SemanticSegmVisMetric,
|
|
5
|
+
)
|
|
6
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
7
|
+
ChartWidget,
|
|
8
|
+
MarkdownWidget,
|
|
9
|
+
TableWidget,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Speedtest(SemanticSegmVisMetric):
|
|
14
|
+
|
|
15
|
+
def is_empty(self) -> bool:
|
|
16
|
+
return self.eval_result.speedtest_info is None
|
|
17
|
+
|
|
18
|
+
def multiple_batche_sizes(self) -> bool:
|
|
19
|
+
return len(self.eval_result.speedtest_info["speedtest"]) > 1
|
|
20
|
+
|
|
21
|
+
@property
|
|
22
|
+
def latency(self) -> List[Union[int, str]]:
|
|
23
|
+
if self.eval_result.speedtest_info is None:
|
|
24
|
+
return ["N/A"]
|
|
25
|
+
latency = []
|
|
26
|
+
for test in self.eval_result.speedtest_info["speedtest"]:
|
|
27
|
+
latency.append(round(test["benchmark"]["total"], 2))
|
|
28
|
+
return latency
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def fps(self) -> List[Union[int, str]]:
|
|
32
|
+
if self.eval_result.speedtest_info is None:
|
|
33
|
+
return ["N/A"]
|
|
34
|
+
fps = []
|
|
35
|
+
for test in self.eval_result.speedtest_info["speedtest"]:
|
|
36
|
+
fps.append(round(1000 / test["benchmark"]["total"], 2))
|
|
37
|
+
return fps
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def intro_md(self) -> MarkdownWidget:
|
|
41
|
+
device = self.eval_result.speedtest_info["model_info"]["device"]
|
|
42
|
+
hardware = self.eval_result.speedtest_info["model_info"]["hardware"]
|
|
43
|
+
runtime = self.eval_result.speedtest_info["model_info"]["runtime"]
|
|
44
|
+
num_it = self.eval_result.speedtest_info["speedtest"][0]["num_iterations"]
|
|
45
|
+
|
|
46
|
+
return MarkdownWidget(
|
|
47
|
+
name="speedtest_intro",
|
|
48
|
+
title="Inference Speed",
|
|
49
|
+
text=self.vis_texts.markdown_speedtest_intro.format(device, hardware, runtime, num_it),
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def intro_table(self) -> TableWidget:
|
|
54
|
+
res = {}
|
|
55
|
+
|
|
56
|
+
columns = [" ", "Inference time", "FPS"]
|
|
57
|
+
temp_res = {}
|
|
58
|
+
max_fps = 0
|
|
59
|
+
for test in self.eval_result.speedtest_info["speedtest"]:
|
|
60
|
+
batch_size = test["batch_size"]
|
|
61
|
+
|
|
62
|
+
ms = round(test["benchmark"]["total"], 2)
|
|
63
|
+
fps = round(1000 / test["benchmark"]["total"] * batch_size)
|
|
64
|
+
row = [batch_size, ms, fps]
|
|
65
|
+
temp_res[batch_size] = row
|
|
66
|
+
max_fps = max(max_fps, fps)
|
|
67
|
+
|
|
68
|
+
res["content"] = []
|
|
69
|
+
# sort by batch size
|
|
70
|
+
temp_res = dict(sorted(temp_res.items()))
|
|
71
|
+
for row in temp_res.values():
|
|
72
|
+
dct = {
|
|
73
|
+
"row": row,
|
|
74
|
+
"id": row[0],
|
|
75
|
+
"items": row,
|
|
76
|
+
}
|
|
77
|
+
res["content"].append(dct)
|
|
78
|
+
|
|
79
|
+
columns_options = [
|
|
80
|
+
{"disableSort": True}, # "customCell": True
|
|
81
|
+
{"subtitle": "ms", "tooltip": "Milliseconds for batch images", "postfix": "ms"},
|
|
82
|
+
{
|
|
83
|
+
"subtitle": "imgs/sec",
|
|
84
|
+
"tooltip": "Frames (images) per second",
|
|
85
|
+
"postfix": "fps",
|
|
86
|
+
"maxValue": max_fps,
|
|
87
|
+
},
|
|
88
|
+
]
|
|
89
|
+
|
|
90
|
+
res["columns"] = columns
|
|
91
|
+
res["columnsOptions"] = columns_options
|
|
92
|
+
|
|
93
|
+
table = TableWidget(
|
|
94
|
+
name="speedtest_intro_table",
|
|
95
|
+
data=res,
|
|
96
|
+
show_header_controls=False,
|
|
97
|
+
fix_columns=1,
|
|
98
|
+
)
|
|
99
|
+
# table.main_column = "Batch size"
|
|
100
|
+
table.fixed_columns = 1
|
|
101
|
+
table.show_header_controls = False
|
|
102
|
+
return table
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def batch_size_md(self) -> MarkdownWidget:
|
|
106
|
+
return MarkdownWidget(
|
|
107
|
+
name="batch_size",
|
|
108
|
+
title="Batch Size",
|
|
109
|
+
text=self.vis_texts.markdown_batch_inference,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def chart(self) -> ChartWidget:
|
|
114
|
+
return ChartWidget(name="speed_charts", figure=self.get_figure())
|
|
115
|
+
|
|
116
|
+
def get_figure(self): # -> Optional[go.Figure]
|
|
117
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
118
|
+
from plotly.subplots import make_subplots # pylint: disable=import-error
|
|
119
|
+
|
|
120
|
+
fig = make_subplots(cols=2)
|
|
121
|
+
|
|
122
|
+
ms_color = "#e377c2"
|
|
123
|
+
fps_color = "#17becf"
|
|
124
|
+
|
|
125
|
+
temp_res = {}
|
|
126
|
+
for test in self.eval_result.speedtest_info["speedtest"]:
|
|
127
|
+
batch_size = test["batch_size"]
|
|
128
|
+
|
|
129
|
+
std = test["benchmark_std"]["total"]
|
|
130
|
+
ms = test["benchmark"]["total"]
|
|
131
|
+
fps = round(1000 / test["benchmark"]["total"] * batch_size)
|
|
132
|
+
|
|
133
|
+
ms_line = temp_res.setdefault("ms", {})
|
|
134
|
+
fps_line = temp_res.setdefault("fps", {})
|
|
135
|
+
ms_std_line = temp_res.setdefault("ms_std", {})
|
|
136
|
+
|
|
137
|
+
ms_line[batch_size] = ms
|
|
138
|
+
fps_line[batch_size] = fps
|
|
139
|
+
ms_std_line[batch_size] = round(std, 2)
|
|
140
|
+
|
|
141
|
+
fig.add_trace(
|
|
142
|
+
go.Scatter(
|
|
143
|
+
x=list(temp_res["ms"].keys()),
|
|
144
|
+
y=list(temp_res["ms"].values()),
|
|
145
|
+
name="Inference time (ms)",
|
|
146
|
+
line=dict(color=ms_color),
|
|
147
|
+
customdata=list(temp_res["ms_std"].values()),
|
|
148
|
+
error_y=dict(
|
|
149
|
+
type="data",
|
|
150
|
+
array=list(temp_res["ms_std"].values()),
|
|
151
|
+
visible=True,
|
|
152
|
+
color="rgba(227, 119, 194, 0.7)",
|
|
153
|
+
),
|
|
154
|
+
hovertemplate="Batch Size: %{x}<br>Time: %{y:.2f} ms<br> Standard deviation: %{customdata:.2f} ms<extra></extra>",
|
|
155
|
+
),
|
|
156
|
+
col=1,
|
|
157
|
+
row=1,
|
|
158
|
+
)
|
|
159
|
+
fig.add_trace(
|
|
160
|
+
go.Scatter(
|
|
161
|
+
x=list(temp_res["fps"].keys()),
|
|
162
|
+
y=list(temp_res["fps"].values()),
|
|
163
|
+
name="FPS",
|
|
164
|
+
line=dict(color=fps_color),
|
|
165
|
+
hovertemplate="Batch Size: %{x}<br>FPS: %{y:.2f}<extra></extra>",
|
|
166
|
+
),
|
|
167
|
+
col=2,
|
|
168
|
+
row=1,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
fig.update_xaxes(title_text="Batch size", col=1, dtick=1)
|
|
172
|
+
fig.update_xaxes(title_text="Batch size", col=2, dtick=1)
|
|
173
|
+
|
|
174
|
+
fig.update_yaxes(title_text="Time (ms)", col=1)
|
|
175
|
+
fig.update_yaxes(title_text="FPS", col=2)
|
|
176
|
+
fig.update_layout(height=400)
|
|
177
|
+
|
|
178
|
+
return fig
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
markdown_header = """
|
|
2
|
+
<h1>{}</h1>
|
|
3
|
+
|
|
4
|
+
<div class="model-info-block">
|
|
5
|
+
<div>Created by <b>{}</b></div>
|
|
6
|
+
<div><i class="zmdi zmdi-calendar-alt"></i><span>{}</span></div>
|
|
7
|
+
</div>
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
markdown_overview = """
|
|
11
|
+
- **Model**: {}
|
|
12
|
+
- **Checkpoint**: {}
|
|
13
|
+
- **Architecture**: {}
|
|
14
|
+
- **Task type**: {}
|
|
15
|
+
- **Runtime**: {}
|
|
16
|
+
- **Checkpoint file**: <a href="{}" target="_blank">{}</a>
|
|
17
|
+
- **Ground Truth project**: <a href="/projects/{}/datasets" target="_blank">{}</a>, {}{}
|
|
18
|
+
{}
|
|
19
|
+
|
|
20
|
+
Learn more about Model Benchmark, implementation details, and how to use the charts in our <a href="{}" target="_blank">Technical Report</a>.
|
|
21
|
+
"""
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
import random
|
|
2
|
+
from collections import defaultdict
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import supervisely.nn.benchmark.semantic_segmentation.text_templates as vis_texts
|
|
6
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisualizer
|
|
7
|
+
from supervisely.nn.benchmark.cv_tasks import CVTask
|
|
8
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.acknowledgement import (
|
|
9
|
+
Acknowledgement,
|
|
10
|
+
)
|
|
11
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.classwise_error_analysis import (
|
|
12
|
+
ClasswiseErrorAnalysis,
|
|
13
|
+
)
|
|
14
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.confusion_matrix import (
|
|
15
|
+
ConfusionMatrix,
|
|
16
|
+
)
|
|
17
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.explore_predictions import (
|
|
18
|
+
ExplorePredictions,
|
|
19
|
+
)
|
|
20
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.frequently_confused import (
|
|
21
|
+
FrequentlyConfused,
|
|
22
|
+
)
|
|
23
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.iou_eou import (
|
|
24
|
+
IntersectionErrorOverUnion,
|
|
25
|
+
)
|
|
26
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.key_metrics import (
|
|
27
|
+
KeyMetrics,
|
|
28
|
+
)
|
|
29
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.model_predictions import (
|
|
30
|
+
ModelPredictions,
|
|
31
|
+
)
|
|
32
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.overview import Overview
|
|
33
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.renormalized_error_ou import (
|
|
34
|
+
RenormalizedErrorOverUnion,
|
|
35
|
+
)
|
|
36
|
+
from supervisely.nn.benchmark.semantic_segmentation.vis_metrics.speedtest import (
|
|
37
|
+
Speedtest,
|
|
38
|
+
)
|
|
39
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
40
|
+
ContainerWidget,
|
|
41
|
+
MarkdownWidget,
|
|
42
|
+
SidebarWidget,
|
|
43
|
+
)
|
|
44
|
+
from supervisely.project.project import Dataset, OpenMode, Project
|
|
45
|
+
from supervisely.project.project_meta import ProjectMeta
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class SemanticSegmentationVisualizer(BaseVisualizer):
|
|
49
|
+
def __init__(self, *args, **kwargs):
|
|
50
|
+
super().__init__(*args, **kwargs)
|
|
51
|
+
|
|
52
|
+
self.vis_texts = vis_texts
|
|
53
|
+
self._widgets_created = False
|
|
54
|
+
self.ann_opacity = 0.7
|
|
55
|
+
|
|
56
|
+
diff_project_info, diff_dataset_infos, existed = self._get_or_create_diff_project()
|
|
57
|
+
self.eval_result.diff_project_info = diff_project_info
|
|
58
|
+
self.eval_result.diff_dataset_infos = diff_dataset_infos
|
|
59
|
+
self.eval_result.matched_pair_data = {}
|
|
60
|
+
|
|
61
|
+
self.gt_project_path = str(Path(self.workdir).parent / "gt_project")
|
|
62
|
+
self.pred_project_path = str(Path(self.workdir).parent / "pred_project")
|
|
63
|
+
|
|
64
|
+
self.eval_result.images_map = {}
|
|
65
|
+
self.eval_result.images_by_class = defaultdict(set)
|
|
66
|
+
if not existed:
|
|
67
|
+
self._init_match_data()
|
|
68
|
+
|
|
69
|
+
# set filtered project meta
|
|
70
|
+
self.eval_result.filtered_project_meta = self._get_filtered_project_meta(self.eval_result)
|
|
71
|
+
|
|
72
|
+
self._get_sample_data_for_gallery()
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def cv_task(self):
|
|
76
|
+
return CVTask.SEMANTIC_SEGMENTATION
|
|
77
|
+
|
|
78
|
+
def _create_widgets(self):
|
|
79
|
+
# Modal Gellery
|
|
80
|
+
self.diff_modal = self._create_diff_modal_table()
|
|
81
|
+
self.explore_modal = self._create_explore_modal_table(
|
|
82
|
+
click_gallery_id=self.diff_modal.id, hover_text="Compare with GT"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Notifcation
|
|
86
|
+
self.clickable_label = self._create_clickable_label()
|
|
87
|
+
|
|
88
|
+
# overview
|
|
89
|
+
overview = Overview(self.vis_texts, self.eval_result)
|
|
90
|
+
self.header = overview.get_header(self.api.user.get_my_info().login)
|
|
91
|
+
self.overview_md = overview.overview_md
|
|
92
|
+
|
|
93
|
+
# key metrics
|
|
94
|
+
key_metrics = KeyMetrics(self.vis_texts, self.eval_result)
|
|
95
|
+
self.key_metrics_md = key_metrics.md
|
|
96
|
+
self.key_metrics_chart = key_metrics.chart
|
|
97
|
+
|
|
98
|
+
# explore predictions
|
|
99
|
+
explore_predictions = ExplorePredictions(
|
|
100
|
+
self.vis_texts, self.eval_result, self.explore_modal, self.diff_modal
|
|
101
|
+
)
|
|
102
|
+
self.explore_predictions_md = explore_predictions.md
|
|
103
|
+
self.explore_predictions_gallery = explore_predictions.gallery(self.ann_opacity)
|
|
104
|
+
|
|
105
|
+
# model predictions
|
|
106
|
+
model_predictions = ModelPredictions(self.vis_texts, self.eval_result, self.diff_modal)
|
|
107
|
+
self.model_predictions_md = model_predictions.md
|
|
108
|
+
self.model_predictions_table = model_predictions.table
|
|
109
|
+
|
|
110
|
+
# intersection over union
|
|
111
|
+
iou_eou = IntersectionErrorOverUnion(self.vis_texts, self.eval_result)
|
|
112
|
+
self.iou_eou_md = iou_eou.md
|
|
113
|
+
self.iou_eou_chart = iou_eou.chart
|
|
114
|
+
|
|
115
|
+
# renormalized error over union
|
|
116
|
+
renorm_eou = RenormalizedErrorOverUnion(self.vis_texts, self.eval_result)
|
|
117
|
+
self.renorm_eou_md = renorm_eou.md
|
|
118
|
+
self.renorm_eou_chart = renorm_eou.chart
|
|
119
|
+
|
|
120
|
+
# classwise error analysis
|
|
121
|
+
classwise_error_analysis = ClasswiseErrorAnalysis(
|
|
122
|
+
self.vis_texts, self.eval_result, self.explore_modal
|
|
123
|
+
)
|
|
124
|
+
self.classwise_error_analysis_md = classwise_error_analysis.md
|
|
125
|
+
self.classwise_error_analysis_chart = classwise_error_analysis.chart
|
|
126
|
+
|
|
127
|
+
# confusion matrix
|
|
128
|
+
confusion_matrix = ConfusionMatrix(self.vis_texts, self.eval_result, self.explore_modal)
|
|
129
|
+
self.confusion_matrix_md = confusion_matrix.md
|
|
130
|
+
self.confusion_matrix_chart = confusion_matrix.chart
|
|
131
|
+
|
|
132
|
+
# frequently confused
|
|
133
|
+
frequently_confused = FrequentlyConfused(
|
|
134
|
+
self.vis_texts, self.eval_result, self.explore_modal
|
|
135
|
+
)
|
|
136
|
+
self.frequently_confused_md = frequently_confused.md
|
|
137
|
+
self.frequently_confused_chart = None
|
|
138
|
+
if not frequently_confused.is_empty:
|
|
139
|
+
self.frequently_confused_chart = frequently_confused.chart
|
|
140
|
+
|
|
141
|
+
# Acknowledgement
|
|
142
|
+
acknowledgement = Acknowledgement(self.vis_texts, self.eval_result)
|
|
143
|
+
self.acknowledgement_md = acknowledgement.md
|
|
144
|
+
|
|
145
|
+
# SpeedTest
|
|
146
|
+
self.speedtest_present = False
|
|
147
|
+
self.speedtest_multiple_batch_sizes = False
|
|
148
|
+
speedtest = Speedtest(self.vis_texts, self.eval_result)
|
|
149
|
+
if not speedtest.is_empty():
|
|
150
|
+
self.speedtest_present = True
|
|
151
|
+
self.speedtest_md_intro = speedtest.intro_md
|
|
152
|
+
self.speedtest_intro_table = speedtest.intro_table
|
|
153
|
+
if speedtest.multiple_batche_sizes():
|
|
154
|
+
self.speedtest_multiple_batch_sizes = True
|
|
155
|
+
self.speedtest_batch_inference_md = speedtest.batch_size_md
|
|
156
|
+
self.speedtest_chart = speedtest.chart
|
|
157
|
+
|
|
158
|
+
self._widgets_created = True
|
|
159
|
+
|
|
160
|
+
def _create_layout(self):
|
|
161
|
+
if not self._widgets_created:
|
|
162
|
+
self._create_widgets()
|
|
163
|
+
|
|
164
|
+
is_anchors_widgets = [
|
|
165
|
+
# Overview
|
|
166
|
+
(0, self.header),
|
|
167
|
+
(1, self.overview_md),
|
|
168
|
+
(1, self.key_metrics_md),
|
|
169
|
+
(0, self.key_metrics_chart),
|
|
170
|
+
(1, self.explore_predictions_md),
|
|
171
|
+
(0, self.explore_predictions_gallery),
|
|
172
|
+
(1, self.model_predictions_md),
|
|
173
|
+
(0, self.model_predictions_table),
|
|
174
|
+
(1, self.iou_eou_md),
|
|
175
|
+
(0, self.iou_eou_chart),
|
|
176
|
+
(1, self.renorm_eou_md),
|
|
177
|
+
(0, self.renorm_eou_chart),
|
|
178
|
+
(1, self.classwise_error_analysis_md),
|
|
179
|
+
(0, self.clickable_label),
|
|
180
|
+
(0, self.classwise_error_analysis_chart),
|
|
181
|
+
(1, self.confusion_matrix_md),
|
|
182
|
+
(0, self.clickable_label),
|
|
183
|
+
(0, self.confusion_matrix_chart),
|
|
184
|
+
(1, self.frequently_confused_md),
|
|
185
|
+
]
|
|
186
|
+
if self.frequently_confused_chart is not None:
|
|
187
|
+
is_anchors_widgets.append((0, self.clickable_label))
|
|
188
|
+
is_anchors_widgets.append((0, self.frequently_confused_chart))
|
|
189
|
+
if self.speedtest_present:
|
|
190
|
+
is_anchors_widgets.append((1, self.speedtest_md_intro))
|
|
191
|
+
is_anchors_widgets.append((0, self.speedtest_intro_table))
|
|
192
|
+
if self.speedtest_multiple_batch_sizes:
|
|
193
|
+
is_anchors_widgets.append((0, self.speedtest_batch_inference_md))
|
|
194
|
+
is_anchors_widgets.append((0, self.speedtest_chart))
|
|
195
|
+
|
|
196
|
+
is_anchors_widgets.append((0, self.acknowledgement_md))
|
|
197
|
+
anchors = []
|
|
198
|
+
for is_anchor, widget in is_anchors_widgets:
|
|
199
|
+
if is_anchor:
|
|
200
|
+
anchors.append(widget.id)
|
|
201
|
+
|
|
202
|
+
sidebar = SidebarWidget(widgets=[i[1] for i in is_anchors_widgets], anchors=anchors)
|
|
203
|
+
layout = ContainerWidget(
|
|
204
|
+
widgets=[sidebar, self.explore_modal, self.diff_modal],
|
|
205
|
+
name="main_container",
|
|
206
|
+
)
|
|
207
|
+
return layout
|
|
208
|
+
|
|
209
|
+
def _init_match_data(self):
|
|
210
|
+
gt_project = Project(self.gt_project_path, OpenMode.READ)
|
|
211
|
+
pred_project = Project(self.pred_project_path, OpenMode.READ)
|
|
212
|
+
diff_map = {ds.id: ds for ds in self.eval_result.diff_dataset_infos}
|
|
213
|
+
pred_map = {ds.id: ds for ds in self.eval_result.pred_dataset_infos}
|
|
214
|
+
|
|
215
|
+
def _get_full_name(ds_id: int, ds_id_map: dict):
|
|
216
|
+
ds_info = ds_id_map[ds_id]
|
|
217
|
+
if ds_info.parent_id is None:
|
|
218
|
+
return ds_info.name
|
|
219
|
+
return f"{_get_full_name(ds_info.parent_id, ds_id_map)}/{ds_info.name}"
|
|
220
|
+
|
|
221
|
+
diff_dataset_name_map = {_get_full_name(i, diff_map): ds for i, ds in diff_map.items()}
|
|
222
|
+
pred_dataset_name_map = {_get_full_name(i, pred_map): ds for i, ds in pred_map.items()}
|
|
223
|
+
|
|
224
|
+
with self.pbar(
|
|
225
|
+
message="Visualizations: Initializing match data", total=pred_project.total_items
|
|
226
|
+
) as p:
|
|
227
|
+
for pred_dataset in pred_project.datasets:
|
|
228
|
+
pred_dataset: Dataset
|
|
229
|
+
gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
|
|
230
|
+
try:
|
|
231
|
+
diff_dataset_info = diff_dataset_name_map[pred_dataset.name]
|
|
232
|
+
pred_dataset_info = pred_dataset_name_map[pred_dataset.name]
|
|
233
|
+
except KeyError:
|
|
234
|
+
raise RuntimeError(
|
|
235
|
+
f"Difference project was not created properly. Dataset {pred_dataset.name} is missing"
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
for src_images in self.api.image.get_list_generator(
|
|
240
|
+
pred_dataset_info.id, force_metadata_for_links=False, batch_size=100
|
|
241
|
+
):
|
|
242
|
+
dst_images = self.api.image.copy_batch_optimized(
|
|
243
|
+
pred_dataset_info.id,
|
|
244
|
+
src_images,
|
|
245
|
+
diff_dataset_info.id,
|
|
246
|
+
with_annotations=False,
|
|
247
|
+
skip_validation=True,
|
|
248
|
+
)
|
|
249
|
+
for diff_image_info in dst_images:
|
|
250
|
+
item_name = diff_image_info.name
|
|
251
|
+
|
|
252
|
+
gt_image_info = gt_dataset.get_image_info(item_name)
|
|
253
|
+
pred_image_info = pred_dataset.get_image_info(item_name)
|
|
254
|
+
gt_ann = gt_dataset.get_ann(item_name, gt_project.meta)
|
|
255
|
+
pred_ann = pred_dataset.get_ann(item_name, pred_project.meta)
|
|
256
|
+
|
|
257
|
+
self._update_match_data(
|
|
258
|
+
gt_image_info.id,
|
|
259
|
+
gt_image_info=gt_image_info,
|
|
260
|
+
pred_image_info=pred_image_info,
|
|
261
|
+
diff_image_info=diff_image_info,
|
|
262
|
+
gt_annotation=gt_ann,
|
|
263
|
+
pred_annotation=pred_ann,
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
assert item_name not in self.eval_result.images_map
|
|
267
|
+
|
|
268
|
+
self.eval_result.images_map[item_name] = gt_image_info.id
|
|
269
|
+
|
|
270
|
+
for label in pred_ann.labels:
|
|
271
|
+
self.eval_result.images_by_class[label.obj_class.name].add(
|
|
272
|
+
gt_image_info.id
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
p.update(len(src_images))
|
|
276
|
+
except Exception:
|
|
277
|
+
raise RuntimeError("Match data was not created properly")
|
|
278
|
+
|
|
279
|
+
def _get_sample_data_for_gallery(self):
|
|
280
|
+
# get sample images with annotations for visualization
|
|
281
|
+
pred_ds = random.choice(self.eval_result.pred_dataset_infos)
|
|
282
|
+
imgs = self.api.image.get_list(pred_ds.id, limit=9, force_metadata_for_links=False)
|
|
283
|
+
anns = self.api.annotation.download_batch(
|
|
284
|
+
pred_ds.id, [x.id for x in imgs], force_metadata_for_links=False
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
self.eval_result.sample_images = imgs
|
|
288
|
+
self.eval_result.sample_anns = anns
|
|
289
|
+
|
|
290
|
+
def _create_clickable_label(self):
|
|
291
|
+
return MarkdownWidget(name="clickable_label", title="", text=self.vis_texts.clickable_label)
|
|
292
|
+
|
|
293
|
+
def _get_filtered_project_meta(self, eval_result) -> ProjectMeta:
|
|
294
|
+
remove_classes = []
|
|
295
|
+
meta = eval_result.pred_project_meta.clone()
|
|
296
|
+
meta = meta.merge(eval_result.gt_project_meta)
|
|
297
|
+
if eval_result.classes_whitelist:
|
|
298
|
+
for obj_class in meta.obj_classes:
|
|
299
|
+
if obj_class.name not in eval_result.classes_whitelist:
|
|
300
|
+
if obj_class.name not in [eval_result.mp.bg_cls_name, "__bg__"]:
|
|
301
|
+
remove_classes.append(obj_class.name)
|
|
302
|
+
if remove_classes:
|
|
303
|
+
meta = meta.delete_obj_classes(remove_classes)
|
|
304
|
+
return meta
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# isort: skip_file
|
|
2
|
+
from supervisely.nn.benchmark.utils.detection.calculate_metrics import calculate_metrics
|
|
3
|
+
from supervisely.nn.benchmark.utils.detection.metric_provider import MetricProvider
|
|
4
|
+
from supervisely.nn.benchmark.utils.detection.sly2coco import sly2coco
|
|
5
|
+
from supervisely.nn.benchmark.utils.detection.utlis import read_coco_datasets
|
|
6
|
+
from supervisely.nn.benchmark.utils.detection.utlis import try_set_conf_auto
|
|
7
|
+
|
|
8
|
+
from supervisely.nn.benchmark.utils.semantic_segmentation.calculate_metrics import (
|
|
9
|
+
calculate_metrics as calculate_semsegm_metrics,
|
|
10
|
+
)
|
|
11
|
+
from supervisely.nn.benchmark.utils.semantic_segmentation.evaluator import Evaluator
|
|
12
|
+
from supervisely.nn.benchmark.utils.semantic_segmentation.loader import build_segmentation_loader
|
|
@@ -67,7 +67,9 @@ def calculate_metrics(
|
|
|
67
67
|
|
|
68
68
|
iou_t = 0
|
|
69
69
|
is_custom_iou_threshold = (
|
|
70
|
-
evaluation_params is not None
|
|
70
|
+
evaluation_params is not None
|
|
71
|
+
and evaluation_params.get("iou_threshold")
|
|
72
|
+
and evaluation_params["iou_threshold"] != 0.5
|
|
71
73
|
)
|
|
72
74
|
if is_custom_iou_threshold:
|
|
73
75
|
iou_t = np.where(cocoEval.params.iouThrs == evaluation_params["iou_threshold"])[0][0]
|
|
@@ -243,13 +245,13 @@ def get_matches(eval_img_dict: dict, eval_img_dict_cls: dict, cocoEval_cls, iou_
|
|
|
243
245
|
"""
|
|
244
246
|
type cocoEval_cls: COCOeval
|
|
245
247
|
"""
|
|
246
|
-
|
|
248
|
+
cat_ids = cocoEval_cls.cocoGt.getCatIds()
|
|
247
249
|
matches = []
|
|
248
250
|
for img_id, eval_imgs in eval_img_dict.items():
|
|
249
251
|
|
|
250
252
|
# get miss-classified
|
|
251
253
|
eval_img_cls = eval_img_dict_cls[img_id][0]
|
|
252
|
-
|
|
254
|
+
gt_ids_orig_cls = [_["id"] for i in cat_ids for _ in cocoEval_cls._gts[img_id, i]]
|
|
253
255
|
|
|
254
256
|
for eval_img in eval_imgs:
|
|
255
257
|
|
|
@@ -301,7 +303,7 @@ def get_matches(eval_img_dict: dict, eval_img_dict_cls: dict, cocoEval_cls, iou_
|
|
|
301
303
|
|
|
302
304
|
# Correction on miss-classification
|
|
303
305
|
cls_gt_id, iou = _get_missclassified_match(
|
|
304
|
-
eval_img_cls, dt_id,
|
|
306
|
+
eval_img_cls, dt_id, gt_ids_orig_cls, eval_img_cls["dtIds"], iou_t
|
|
305
307
|
)
|
|
306
308
|
if cls_gt_id is not None:
|
|
307
309
|
assert iou >= 0.5, iou
|