supervisely 6.73.237__py3-none-any.whl → 6.73.239__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/annotation/annotation.py +2 -2
- supervisely/api/entity_annotation/tag_api.py +11 -4
- supervisely/geometry/rectangle.py +7 -8
- supervisely/nn/__init__.py +1 -0
- supervisely/nn/benchmark/__init__.py +14 -2
- supervisely/nn/benchmark/base_benchmark.py +84 -37
- supervisely/nn/benchmark/base_evaluator.py +120 -0
- supervisely/nn/benchmark/base_visualizer.py +265 -0
- supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
- supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
- supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
- supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
- supervisely/nn/benchmark/object_detection/__init__.py +0 -0
- supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
- supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
- supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
- supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
- supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
- supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
- supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
- supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
- supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
- supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
- supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
- supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
- supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
- supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
- supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
- supervisely/nn/benchmark/utils/__init__.py +12 -0
- supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
- supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
- supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
- supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
- supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
- supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
- supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
- supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
- supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
- supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
- supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
- supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
- supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
- supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
- supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
- supervisely/project/project.py +18 -6
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/METADATA +3 -1
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/RECORD +104 -82
- supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/__init__.py +0 -3
- supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
- supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
- supervisely/nn/benchmark/utils.py +0 -13
- supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
- supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
- supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
- supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
- supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
- supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
- supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
- supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
- supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
- supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
- supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
- supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
- supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
- supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
- supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
- supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
- supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
- supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
- supervisely/nn/benchmark/visualization/visualizer.py +0 -729
- /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/LICENSE +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/WHEEL +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/top_level.txt +0 -0
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
4
|
-
|
|
5
|
-
from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
|
|
6
|
-
from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
|
|
7
|
-
|
|
8
|
-
if TYPE_CHECKING:
|
|
9
|
-
from supervisely.nn.benchmark.visualization.visualizer import Visualizer
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class SpeedtestIntro(MetricVis):
|
|
13
|
-
|
|
14
|
-
def __init__(self, loader: Visualizer) -> None:
|
|
15
|
-
super().__init__(loader)
|
|
16
|
-
speedtest = self._loader.speedtest["speedtest"][0]
|
|
17
|
-
self.schema = Schema(
|
|
18
|
-
self._loader.inference_speed_text,
|
|
19
|
-
markdown_speedtest_intro=Widget.Markdown(
|
|
20
|
-
title="Inference Speed",
|
|
21
|
-
is_header=True,
|
|
22
|
-
formats=[
|
|
23
|
-
speedtest["device"],
|
|
24
|
-
self._loader.hardware,
|
|
25
|
-
speedtest["runtime"]
|
|
26
|
-
],
|
|
27
|
-
),
|
|
28
|
-
)
|
|
@@ -1,141 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
4
|
-
|
|
5
|
-
from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
|
|
6
|
-
from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
|
|
7
|
-
|
|
8
|
-
if TYPE_CHECKING:
|
|
9
|
-
from supervisely.nn.benchmark.visualization.visualizer import Visualizer
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class SpeedtestOverview(MetricVis):
|
|
13
|
-
|
|
14
|
-
def __init__(self, loader: Visualizer) -> None:
|
|
15
|
-
super().__init__(loader)
|
|
16
|
-
self.clickable = False
|
|
17
|
-
num_iterations = self._loader.speedtest["speedtest"][0]["num_iterations"]
|
|
18
|
-
self.schema = Schema(
|
|
19
|
-
self._loader.inference_speed_text,
|
|
20
|
-
markdown_speedtest_overview=Widget.Markdown(title="Info", formats=[num_iterations]),
|
|
21
|
-
table=Widget.Table(),
|
|
22
|
-
)
|
|
23
|
-
self._row_ids = None
|
|
24
|
-
|
|
25
|
-
def get_table(self, widget: Widget.Table) -> dict:
|
|
26
|
-
res = {}
|
|
27
|
-
|
|
28
|
-
columns = [" ", "Infrence time", "FPS"]
|
|
29
|
-
temp_res = {}
|
|
30
|
-
max_fps = 0
|
|
31
|
-
for test in self._loader.speedtest["speedtest"]:
|
|
32
|
-
batch_size = test["batch_size"]
|
|
33
|
-
|
|
34
|
-
ms = round(test["benchmark"]["total"], 2)
|
|
35
|
-
fps = round(1000 / test["benchmark"]["total"] * batch_size)
|
|
36
|
-
row = [batch_size, ms, fps]
|
|
37
|
-
temp_res[batch_size] = row
|
|
38
|
-
max_fps = max(max_fps, fps)
|
|
39
|
-
|
|
40
|
-
res["content"] = []
|
|
41
|
-
# sort by batch size
|
|
42
|
-
temp_res = dict(sorted(temp_res.items()))
|
|
43
|
-
for row in temp_res.values():
|
|
44
|
-
dct = {
|
|
45
|
-
"row": row,
|
|
46
|
-
"id": row[0],
|
|
47
|
-
"items": row,
|
|
48
|
-
}
|
|
49
|
-
res["content"].append(dct)
|
|
50
|
-
|
|
51
|
-
columns_options = [
|
|
52
|
-
{"customCell": True, "disableSort": True},
|
|
53
|
-
{"subtitle": "ms", "tooltip": "Milliseconds for batch images", "postfix": "ms"},
|
|
54
|
-
{
|
|
55
|
-
"subtitle": "imgs/sec",
|
|
56
|
-
"tooltip": "Frames (images) per second",
|
|
57
|
-
"postfix": "fps",
|
|
58
|
-
"maxValue": max_fps,
|
|
59
|
-
},
|
|
60
|
-
]
|
|
61
|
-
|
|
62
|
-
res["columns"] = columns
|
|
63
|
-
res["columnsOptions"] = columns_options
|
|
64
|
-
|
|
65
|
-
widget.main_column = "Batch size"
|
|
66
|
-
widget.fixed_columns = 1
|
|
67
|
-
widget.show_header_controls = False
|
|
68
|
-
return res
|
|
69
|
-
|
|
70
|
-
def get_table_click_data(self, widget: Widget.Table) -> dict:
|
|
71
|
-
return {}
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
## ========================backup (for public benchmark)==========================
|
|
75
|
-
# class SpeedtestOverview(MetricVis):
|
|
76
|
-
|
|
77
|
-
# def __init__(self, loader: Visualizer) -> None:
|
|
78
|
-
# super().__init__(loader)
|
|
79
|
-
# self.clickable = False
|
|
80
|
-
# num_iterations = self._loader.speedtest["speedtest"][0]["num_iterations"]
|
|
81
|
-
# self.schema = Schema(
|
|
82
|
-
# self._loader.inference_speed_text,
|
|
83
|
-
# markdown_speedtest_overview=Widget.Markdown(
|
|
84
|
-
# title="Overview", formats=[num_iterations, self._loader.hardware]
|
|
85
|
-
# ),
|
|
86
|
-
# table=Widget.Table(),
|
|
87
|
-
# )
|
|
88
|
-
# self._row_ids = None
|
|
89
|
-
|
|
90
|
-
# def get_table(self, widget: Widget.Table) -> dict:
|
|
91
|
-
# res = {}
|
|
92
|
-
|
|
93
|
-
# columns = [
|
|
94
|
-
# "Runtime",
|
|
95
|
-
# "Batch size 1",
|
|
96
|
-
# "Batch size 8",
|
|
97
|
-
# "Batch size 16",
|
|
98
|
-
# "Batch size 1",
|
|
99
|
-
# "Batch size 8",
|
|
100
|
-
# "Batch size 16",
|
|
101
|
-
# ]
|
|
102
|
-
# temp_res = {}
|
|
103
|
-
# for test in self._loader.speedtest["speedtest"]:
|
|
104
|
-
# device = "GPU" if "cuda" in test["device"] else "CPU"
|
|
105
|
-
# runtime = test["runtime"]
|
|
106
|
-
# batch_size = test["batch_size"]
|
|
107
|
-
# row_name = f"{device} {runtime}"
|
|
108
|
-
|
|
109
|
-
# row = temp_res.setdefault(row_name, {})
|
|
110
|
-
# row["Runtime"] = row_name
|
|
111
|
-
# row[f"Batch size {batch_size} (ms)"] = round(test["benchmark"]["total"], 2)
|
|
112
|
-
# row[f"Batch size {batch_size} (FPS)"] = round(
|
|
113
|
-
# 1000 / test["benchmark"]["total"] * batch_size
|
|
114
|
-
# )
|
|
115
|
-
|
|
116
|
-
# res["content"] = []
|
|
117
|
-
# for row in temp_res.values():
|
|
118
|
-
# dct = {
|
|
119
|
-
# "row": row,
|
|
120
|
-
# "id": row["Runtime"],
|
|
121
|
-
# "items": list(row.values()),
|
|
122
|
-
# }
|
|
123
|
-
# res["content"].append(dct)
|
|
124
|
-
|
|
125
|
-
# columns_options = [
|
|
126
|
-
# {"maxWidth": "225px"},
|
|
127
|
-
# {"subtitle": "ms", "tooltip": "Milliseconds for 1 image"},
|
|
128
|
-
# {"subtitle": "ms", "tooltip": "Milliseconds for 8 images"},
|
|
129
|
-
# {"subtitle": "ms", "tooltip": "Milliseconds for 16 images"},
|
|
130
|
-
# {"subtitle": "FPS", "tooltip": "Frames (images) per second"},
|
|
131
|
-
# {"subtitle": "FPS", "tooltip": "Frames (images) per second"},
|
|
132
|
-
# {"subtitle": "FPS", "tooltip": "Frames (images) per second"},
|
|
133
|
-
# ]
|
|
134
|
-
|
|
135
|
-
# res["columns"] = columns
|
|
136
|
-
# res["columnsOptions"] = columns_options
|
|
137
|
-
|
|
138
|
-
# return res
|
|
139
|
-
|
|
140
|
-
# def get_table_click_data(self, widget: Widget.Table) -> dict:
|
|
141
|
-
# return {}
|
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
4
|
-
|
|
5
|
-
from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
|
|
6
|
-
from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
|
|
7
|
-
|
|
8
|
-
if TYPE_CHECKING:
|
|
9
|
-
from supervisely.nn.benchmark.visualization.visualizer import Visualizer
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class SpeedtestRealTime(MetricVis):
|
|
13
|
-
|
|
14
|
-
def __init__(self, loader: Visualizer) -> None:
|
|
15
|
-
super().__init__(loader)
|
|
16
|
-
self.switchable: bool = True
|
|
17
|
-
charts = {}
|
|
18
|
-
for batch_size in [1, 8, 16]:
|
|
19
|
-
for measure in ["ms", "fps"]:
|
|
20
|
-
key = f"{batch_size}_{measure}"
|
|
21
|
-
charts[f"chart_{key}"] = Widget.Chart(switch_key=key)
|
|
22
|
-
self.schema = Schema(
|
|
23
|
-
self._loader.inference_speed_text,
|
|
24
|
-
markdown_real_time_inference=Widget.Markdown(title="Real-time inference"),
|
|
25
|
-
**charts,
|
|
26
|
-
)
|
|
27
|
-
|
|
28
|
-
def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]
|
|
29
|
-
import plotly.graph_objects as go # pylint: disable=import-error
|
|
30
|
-
|
|
31
|
-
colors = iter(["#17becf", "#e377c2", "#bcbd22", "#ff7f0e", "#9467bd", "#2ca02c"])
|
|
32
|
-
|
|
33
|
-
batch_size, measure = widget.switch_key.split("_")
|
|
34
|
-
|
|
35
|
-
fig = go.Figure()
|
|
36
|
-
for test in self._loader.speedtest["speedtest"]:
|
|
37
|
-
device = "GPU" if "cuda" in test["device"] else "CPU"
|
|
38
|
-
runtime = test["runtime"]
|
|
39
|
-
runtime_and_device = f"{device} {runtime}"
|
|
40
|
-
bs = test["batch_size"]
|
|
41
|
-
if batch_size != bs:
|
|
42
|
-
continue
|
|
43
|
-
|
|
44
|
-
if measure == "ms":
|
|
45
|
-
total = test["benchmark"]["total"]
|
|
46
|
-
else:
|
|
47
|
-
total = round(1000 / test["benchmark"]["total"] * bs)
|
|
48
|
-
|
|
49
|
-
fig.add_trace(
|
|
50
|
-
go.Bar(
|
|
51
|
-
y=total,
|
|
52
|
-
x=runtime_and_device,
|
|
53
|
-
marker=dict(color=next(colors)),
|
|
54
|
-
)
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
y_title = "Time (ms)" if measure == "ms" else "Images per second (FPS)"
|
|
59
|
-
fig.update_xaxes(title_text="Runtime")
|
|
60
|
-
fig.update_yaxes(title_text=y_title)
|
|
61
|
-
fig.update_layout(height=400, width=800)
|
|
62
|
-
|
|
63
|
-
return fig
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
markdown_speedtest_intro = """## Inference Speed
|
|
2
|
-
|
|
3
|
-
This is a speed test benchmark for this model. The model was tested with the following configuration:
|
|
4
|
-
|
|
5
|
-
- **Device**: {}
|
|
6
|
-
- **Hardware**: {}
|
|
7
|
-
- **Runtime**: {}
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
markdown_speedtest_overview = """
|
|
11
|
-
The table below shows the speed test results. For each test, the time taken to process one batch of images is shown, as well as the model's throughput (i.e, the number of images processed per second, or FPS). Results are averaged across **{}** iterations.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
markdown_real_time_inference = """## Real-time Inference
|
|
15
|
-
|
|
16
|
-
This chart compares different runtimes and devices (CPU or GPU)."""
|
|
17
|
-
|
|
18
|
-
# We additionally divide **predict** procedure into three stages: pre-process, inference, and post-process. Each bar in this chart consists of these three stages. For example, in the chart you can find how long the post-process phase lasts in a CPU device with an ONNXRuntime environment."""
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
markdown_batch_inference = """
|
|
22
|
-
This chart shows how the model's speed changes with different batch sizes . As the batch size increases, you can observe an increase in FPS (images per second).
|
|
23
|
-
"""
|
|
@@ -1,337 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import TYPE_CHECKING, List, Optional
|
|
4
|
-
|
|
5
|
-
if TYPE_CHECKING:
|
|
6
|
-
from supervisely.nn.benchmark.visualization.visualizer import Visualizer
|
|
7
|
-
|
|
8
|
-
from jinja2 import Template
|
|
9
|
-
|
|
10
|
-
from supervisely._utils import camel_to_snake
|
|
11
|
-
from supervisely.nn.benchmark.cv_tasks import CVTask
|
|
12
|
-
from supervisely.nn.benchmark.visualization.vis_templates import (
|
|
13
|
-
template_chart_str,
|
|
14
|
-
template_gallery_str,
|
|
15
|
-
template_markdown_str,
|
|
16
|
-
template_notification_str,
|
|
17
|
-
template_radiogroup_str,
|
|
18
|
-
template_table_str,
|
|
19
|
-
)
|
|
20
|
-
from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
|
|
21
|
-
from supervisely.project.project_meta import ProjectMeta
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class MetricVis:
|
|
25
|
-
|
|
26
|
-
def __init__(self, loader: Visualizer) -> None:
|
|
27
|
-
|
|
28
|
-
self.cv_tasks: List[CVTask] = CVTask.values()
|
|
29
|
-
self.clickable: bool = False
|
|
30
|
-
self.has_diffs_view: bool = False
|
|
31
|
-
self.switchable: bool = False
|
|
32
|
-
self.schema: Schema = None
|
|
33
|
-
self.empty = False
|
|
34
|
-
self._is_overview = False
|
|
35
|
-
|
|
36
|
-
self._loader = loader
|
|
37
|
-
self._template_markdown = Template(template_markdown_str)
|
|
38
|
-
self._template_chart = Template(template_chart_str)
|
|
39
|
-
self._template_radiogroup = Template(template_radiogroup_str)
|
|
40
|
-
self._template_gallery = Template(template_gallery_str)
|
|
41
|
-
self._template_table = Template(template_table_str)
|
|
42
|
-
self._template_notification = Template(template_notification_str)
|
|
43
|
-
self._keypair_sep = "-"
|
|
44
|
-
|
|
45
|
-
@property
|
|
46
|
-
def radiogroup_id(self) -> Optional[str]:
|
|
47
|
-
if self.switchable:
|
|
48
|
-
return f"radiogroup_" + self.name
|
|
49
|
-
|
|
50
|
-
@property
|
|
51
|
-
def template_sidebar_str(self) -> str:
|
|
52
|
-
res = ""
|
|
53
|
-
for widget in self.schema: # pylint: disable=not-an-iterable
|
|
54
|
-
if isinstance(widget, Widget.Markdown):
|
|
55
|
-
if widget.title is not None and widget.is_header:
|
|
56
|
-
res += f"""\n <div>\n <el-button type="text" @click="data.scrollIntoView='{widget.id}'" """
|
|
57
|
-
res += (
|
|
58
|
-
""":style="{fontWeight: data.scrollIntoView==='"""
|
|
59
|
-
+ widget.id
|
|
60
|
-
+ """' ? 'bold' : 'normal'}"""
|
|
61
|
-
)
|
|
62
|
-
res += f""" ">{widget.title}</el-button>\n </div>"""
|
|
63
|
-
return res
|
|
64
|
-
|
|
65
|
-
@property
|
|
66
|
-
def template_main_str(self) -> str:
|
|
67
|
-
res = ""
|
|
68
|
-
|
|
69
|
-
_is_before_chart = True
|
|
70
|
-
|
|
71
|
-
def _add_radio_buttons():
|
|
72
|
-
res = ""
|
|
73
|
-
for widget in self.schema: # pylint: disable=not-an-iterable
|
|
74
|
-
if isinstance(widget, Widget.Chart):
|
|
75
|
-
basename = f"{widget.name}_{self.name}"
|
|
76
|
-
res += "\n {{ " + f"el_radio_{basename}_html" + " }}"
|
|
77
|
-
return res
|
|
78
|
-
|
|
79
|
-
is_radiobuttons_added = False
|
|
80
|
-
|
|
81
|
-
for widget in self.schema: # pylint: disable=not-an-iterable
|
|
82
|
-
if isinstance(widget, Widget.Chart):
|
|
83
|
-
_is_before_chart = False
|
|
84
|
-
|
|
85
|
-
if (
|
|
86
|
-
isinstance(widget, (Widget.Markdown, Widget.Notification, Widget.Collapse))
|
|
87
|
-
and _is_before_chart
|
|
88
|
-
):
|
|
89
|
-
res += "\n {{ " + f"{widget.name}_html" + " }}"
|
|
90
|
-
continue
|
|
91
|
-
|
|
92
|
-
if isinstance(widget, (Widget.Chart, Widget.Gallery, Widget.Table)):
|
|
93
|
-
basename = f"{widget.name}_{self.name}"
|
|
94
|
-
if self.switchable and not is_radiobuttons_added:
|
|
95
|
-
res += _add_radio_buttons()
|
|
96
|
-
is_radiobuttons_added = True
|
|
97
|
-
res += "\n {{ " + f"{basename}_html" + " }}"
|
|
98
|
-
if self.clickable:
|
|
99
|
-
res += "\n {{ " + f"{basename}_clickdata_html" + " }}"
|
|
100
|
-
continue
|
|
101
|
-
|
|
102
|
-
if (
|
|
103
|
-
isinstance(widget, (Widget.Markdown, Widget.Notification, Widget.Collapse))
|
|
104
|
-
and not _is_before_chart
|
|
105
|
-
):
|
|
106
|
-
res += "\n {{ " + f"{widget.name}_html" + " }}"
|
|
107
|
-
continue
|
|
108
|
-
|
|
109
|
-
return res
|
|
110
|
-
|
|
111
|
-
def get_html_snippets(self) -> dict:
|
|
112
|
-
res = {}
|
|
113
|
-
for widget in self.schema: # pylint: disable=not-an-iterable
|
|
114
|
-
if isinstance(widget, Widget.Markdown):
|
|
115
|
-
res[f"{widget.name}_html"] = self._template_markdown.render(
|
|
116
|
-
{
|
|
117
|
-
"widget_id": widget.id,
|
|
118
|
-
"data_source": f"/data/{widget.name}.md",
|
|
119
|
-
"command": "command",
|
|
120
|
-
"data": "data",
|
|
121
|
-
"is_overview": widget.title == "Overview",
|
|
122
|
-
}
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
if isinstance(widget, Widget.Collapse):
|
|
126
|
-
subres = {}
|
|
127
|
-
for subwidget in widget.schema:
|
|
128
|
-
if isinstance(subwidget, Widget.Markdown):
|
|
129
|
-
subres[f"{subwidget.name}_html"] = self._template_markdown.render(
|
|
130
|
-
{
|
|
131
|
-
"widget_id": subwidget.id,
|
|
132
|
-
"data_source": f"/data/{subwidget.name}.md",
|
|
133
|
-
"command": "command",
|
|
134
|
-
"data": "data",
|
|
135
|
-
}
|
|
136
|
-
)
|
|
137
|
-
res[f"{widget.name}_html"] = widget.template_schema.render(**subres)
|
|
138
|
-
continue
|
|
139
|
-
|
|
140
|
-
if isinstance(widget, Widget.Notification):
|
|
141
|
-
res[f"{widget.name}_html"] = self._template_notification.render(
|
|
142
|
-
{
|
|
143
|
-
"widget_id": widget.id,
|
|
144
|
-
"data": "data",
|
|
145
|
-
"title": widget.title.format(*widget.formats_title),
|
|
146
|
-
"description": widget.description.format(*widget.formats_desc),
|
|
147
|
-
}
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
if isinstance(widget, Widget.Chart):
|
|
151
|
-
basename = f"{widget.name}_{self.name}"
|
|
152
|
-
if self.switchable:
|
|
153
|
-
res[f"el_radio_{basename}_html"] = self._template_radiogroup.render(
|
|
154
|
-
{
|
|
155
|
-
"radio_group": self.radiogroup_id,
|
|
156
|
-
"switch_key": widget.switch_key,
|
|
157
|
-
}
|
|
158
|
-
)
|
|
159
|
-
chart_click_path = f"/data/{basename}_click_data.json" if self.clickable else None
|
|
160
|
-
chart_modal_data_source = f"/data/modal_general.json" if self.clickable else None
|
|
161
|
-
res[f"{basename}_html"] = self._template_chart.render(
|
|
162
|
-
{
|
|
163
|
-
"widget_id": widget.id,
|
|
164
|
-
"init_data_source": f"/data/{basename}.json",
|
|
165
|
-
"chart_click_data_source": chart_click_path,
|
|
166
|
-
"command": "command",
|
|
167
|
-
"data": "data",
|
|
168
|
-
"cls_name": self.name,
|
|
169
|
-
"key_separator": self._keypair_sep,
|
|
170
|
-
"switchable": self.switchable,
|
|
171
|
-
"radio_group": self.radiogroup_id,
|
|
172
|
-
"switch_key": widget.switch_key,
|
|
173
|
-
"chart_modal_data_source": chart_modal_data_source,
|
|
174
|
-
}
|
|
175
|
-
)
|
|
176
|
-
if isinstance(widget, Widget.Gallery):
|
|
177
|
-
basename = f"{widget.name}_{self.name}"
|
|
178
|
-
if widget.is_table_gallery:
|
|
179
|
-
for w in self.schema: # pylint: disable=not-an-iterable
|
|
180
|
-
if isinstance(w, Widget.Table):
|
|
181
|
-
w.gallery_id = widget.id
|
|
182
|
-
|
|
183
|
-
gallery_click_data_source = (
|
|
184
|
-
f"/data/{basename}_click_data.json" if self.clickable else None
|
|
185
|
-
)
|
|
186
|
-
gallery_modal_data_source = (
|
|
187
|
-
f"/data/{basename}_modal_data.json" if self.clickable else None
|
|
188
|
-
)
|
|
189
|
-
gallery_diff_data_source = (
|
|
190
|
-
f"/data/{basename}_diff_data.json" if self.has_diffs_view else None
|
|
191
|
-
)
|
|
192
|
-
res[f"{basename}_html"] = self._template_gallery.render(
|
|
193
|
-
{
|
|
194
|
-
"widget_id": widget.id,
|
|
195
|
-
"init_data_source": f"/data/{basename}.json",
|
|
196
|
-
"command": "command",
|
|
197
|
-
"data": "data",
|
|
198
|
-
"is_table_gallery": widget.is_table_gallery,
|
|
199
|
-
"gallery_click_data_source": gallery_click_data_source,
|
|
200
|
-
"gallery_diff_data_source": gallery_diff_data_source,
|
|
201
|
-
"gallery_modal_data_source": gallery_modal_data_source,
|
|
202
|
-
}
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
if isinstance(widget, Widget.Table):
|
|
206
|
-
basename = f"{widget.name}_{self.name}"
|
|
207
|
-
res[f"{basename}_html"] = self._template_table.render(
|
|
208
|
-
{
|
|
209
|
-
"widget_id": widget.id,
|
|
210
|
-
"init_data_source": f"/data/{basename}.json",
|
|
211
|
-
"command": "command",
|
|
212
|
-
"data": "data",
|
|
213
|
-
"table_click_data": f"/data/{widget.name}_{self.name}_click_data.json",
|
|
214
|
-
"table_gallery_id": f"modal_general",
|
|
215
|
-
"clickable": self.clickable,
|
|
216
|
-
"mainColumn": widget.main_column,
|
|
217
|
-
"fixColumns": widget.fixed_columns,
|
|
218
|
-
"showHeaderControls": widget.show_header_controls,
|
|
219
|
-
"width": f"width:{widget.width}" if widget.width else "",
|
|
220
|
-
}
|
|
221
|
-
)
|
|
222
|
-
|
|
223
|
-
return res
|
|
224
|
-
|
|
225
|
-
@property
|
|
226
|
-
def name(self) -> str:
|
|
227
|
-
return camel_to_snake(self.__class__.__name__)
|
|
228
|
-
|
|
229
|
-
@property
|
|
230
|
-
def f1_optimal_conf(self) -> Optional[float]:
|
|
231
|
-
return self._loader.f1_optimal_conf
|
|
232
|
-
|
|
233
|
-
def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
|
|
234
|
-
pass
|
|
235
|
-
|
|
236
|
-
def get_click_data(self, widget: Widget.Chart) -> Optional[dict]:
|
|
237
|
-
if not self.clickable:
|
|
238
|
-
return
|
|
239
|
-
|
|
240
|
-
res = {}
|
|
241
|
-
|
|
242
|
-
res["layoutTemplate"] = [None, None, None]
|
|
243
|
-
res["clickData"] = {}
|
|
244
|
-
for key, v in self._loader.click_data.objects_by_class.items():
|
|
245
|
-
res["clickData"][key] = {}
|
|
246
|
-
res["clickData"][key]["imagesIds"] = []
|
|
247
|
-
|
|
248
|
-
# tmp = defaultdict(list)
|
|
249
|
-
img_ids = set()
|
|
250
|
-
obj_ids = set()
|
|
251
|
-
|
|
252
|
-
res["clickData"][key][
|
|
253
|
-
"title"
|
|
254
|
-
] = f"{key} class: {len(v)} object{'s' if len(v) > 1 else ''}"
|
|
255
|
-
|
|
256
|
-
for x in v:
|
|
257
|
-
img_ids.add(x["dt_img_id"])
|
|
258
|
-
obj_ids.add(x["dt_obj_id"])
|
|
259
|
-
|
|
260
|
-
res["clickData"][key]["imagesIds"] = list(img_ids)
|
|
261
|
-
res["clickData"][key]["filters"] = [
|
|
262
|
-
{"type": "tag", "tagId": "confidence", "value": [self.f1_optimal_conf, 1]},
|
|
263
|
-
{"type": "tag", "tagId": "outcome", "value": "TP"},
|
|
264
|
-
{"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
|
|
265
|
-
]
|
|
266
|
-
|
|
267
|
-
return res
|
|
268
|
-
|
|
269
|
-
def get_modal_data(self, widget: Widget.Chart) -> Optional[dict]:
|
|
270
|
-
res = {}
|
|
271
|
-
api = self._loader._api
|
|
272
|
-
gt_project_id = self._loader.gt_project_info.id
|
|
273
|
-
dt_project_id = self._loader.dt_project_info.id
|
|
274
|
-
diff_project_id = self._loader.diff_project_info.id
|
|
275
|
-
gt_dataset = api.dataset.get_list(gt_project_id)[0]
|
|
276
|
-
dt_dataset = api.dataset.get_list(dt_project_id)[0]
|
|
277
|
-
diff_dataset = api.dataset.get_list(diff_project_id)[0]
|
|
278
|
-
gt_image_infos = api.image.get_list(dataset_id=gt_dataset.id)[:3]
|
|
279
|
-
pred_image_infos = api.image.get_list(dataset_id=dt_dataset.id)[:3]
|
|
280
|
-
diff_image_infos = api.image.get_list(dataset_id=diff_dataset.id)[:3]
|
|
281
|
-
project_metas = [
|
|
282
|
-
ProjectMeta.from_json(data=api.project.get_meta(id=x))
|
|
283
|
-
for x in [gt_project_id, dt_project_id, diff_project_id]
|
|
284
|
-
]
|
|
285
|
-
for gt_image, pred_image, diff_image in zip(
|
|
286
|
-
gt_image_infos, pred_image_infos, diff_image_infos
|
|
287
|
-
):
|
|
288
|
-
image_infos = [gt_image, pred_image, diff_image]
|
|
289
|
-
ann_infos = [api.annotation.download(x.id) for x in image_infos]
|
|
290
|
-
|
|
291
|
-
for idx, (image_info, ann_info, project_meta) in enumerate(
|
|
292
|
-
zip(image_infos, ann_infos, project_metas)
|
|
293
|
-
):
|
|
294
|
-
image_name = image_info.name
|
|
295
|
-
image_url = image_info.preview_url
|
|
296
|
-
is_ignore = True if idx in [0, 1] else False
|
|
297
|
-
widget.gallery.append(
|
|
298
|
-
title=image_name,
|
|
299
|
-
image_url=image_url,
|
|
300
|
-
annotation_info=ann_info,
|
|
301
|
-
column_index=idx,
|
|
302
|
-
project_meta=project_meta,
|
|
303
|
-
ignore_tags_filtering=is_ignore,
|
|
304
|
-
)
|
|
305
|
-
|
|
306
|
-
res.update(widget.gallery.get_json_state())
|
|
307
|
-
res.update(widget.gallery.get_json_data()["content"])
|
|
308
|
-
res["layoutData"] = res.pop("annotations")
|
|
309
|
-
res["projectMeta"] = project_metas[0].to_json()
|
|
310
|
-
|
|
311
|
-
res.pop("layout")
|
|
312
|
-
res.pop("layoutData")
|
|
313
|
-
|
|
314
|
-
return res
|
|
315
|
-
|
|
316
|
-
def get_table(self, widget: Widget.Table) -> Optional[dict]:
|
|
317
|
-
pass
|
|
318
|
-
|
|
319
|
-
def get_gallery(self, widget: Widget.Gallery) -> Optional[dict]:
|
|
320
|
-
pass
|
|
321
|
-
|
|
322
|
-
def get_gallery_click_data(self, widget: Widget.Gallery) -> Optional[dict]:
|
|
323
|
-
pass
|
|
324
|
-
|
|
325
|
-
def get_diff_gallery_data(self, widget: Widget.Gallery) -> Optional[dict]:
|
|
326
|
-
pass
|
|
327
|
-
|
|
328
|
-
def get_md_content(self, widget: Widget.Markdown):
|
|
329
|
-
if hasattr(self._loader.vis_texts, widget.name):
|
|
330
|
-
return getattr(self._loader.vis_texts, widget.name).format(*widget.formats)
|
|
331
|
-
elif hasattr(self._loader.inference_speed_text, widget.name):
|
|
332
|
-
return getattr(self._loader.inference_speed_text, widget.name).format(*widget.formats)
|
|
333
|
-
else:
|
|
334
|
-
raise AttributeError(f"Not found texts template for {widget.name}")
|
|
335
|
-
|
|
336
|
-
def initialize_formats(self, loader: Visualizer, widget: Widget):
|
|
337
|
-
pass
|
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.confidence_distribution import (
|
|
2
|
-
ConfidenceDistribution,
|
|
3
|
-
)
|
|
4
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.confidence_score import (
|
|
5
|
-
ConfidenceScore,
|
|
6
|
-
)
|
|
7
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.confusion_matrix import (
|
|
8
|
-
ConfusionMatrix,
|
|
9
|
-
)
|
|
10
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.explorer_grid import (
|
|
11
|
-
ExplorerGrid,
|
|
12
|
-
)
|
|
13
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.f1_score_at_different_iou import (
|
|
14
|
-
F1ScoreAtDifferentIOU,
|
|
15
|
-
)
|
|
16
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.frequently_confused import (
|
|
17
|
-
FrequentlyConfused,
|
|
18
|
-
)
|
|
19
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.iou_distribution import (
|
|
20
|
-
IOUDistribution,
|
|
21
|
-
)
|
|
22
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.model_predictions import (
|
|
23
|
-
ModelPredictions,
|
|
24
|
-
)
|
|
25
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.outcome_counts import (
|
|
26
|
-
OutcomeCounts,
|
|
27
|
-
)
|
|
28
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.outcome_counts_per_class import (
|
|
29
|
-
PerClassOutcomeCounts,
|
|
30
|
-
)
|
|
31
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.overview import Overview
|
|
32
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.percision_avg_per_class import (
|
|
33
|
-
PerClassAvgPrecision,
|
|
34
|
-
)
|
|
35
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.pr_curve import PRCurve
|
|
36
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.pr_curve_by_class import (
|
|
37
|
-
PRCurveByClass,
|
|
38
|
-
)
|
|
39
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.precision import Precision
|
|
40
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.recall import Recall
|
|
41
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.recall_vs_precision import (
|
|
42
|
-
RecallVsPrecision,
|
|
43
|
-
)
|
|
44
|
-
from supervisely.nn.benchmark.visualization.vis_metrics.reliability_diagram import (
|
|
45
|
-
ReliabilityDiagram,
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
ALL_METRICS = (
|
|
49
|
-
Overview,
|
|
50
|
-
ExplorerGrid,
|
|
51
|
-
ModelPredictions,
|
|
52
|
-
OutcomeCounts,
|
|
53
|
-
Recall,
|
|
54
|
-
Precision,
|
|
55
|
-
RecallVsPrecision,
|
|
56
|
-
PRCurve,
|
|
57
|
-
PRCurveByClass,
|
|
58
|
-
ConfusionMatrix,
|
|
59
|
-
FrequentlyConfused,
|
|
60
|
-
IOUDistribution,
|
|
61
|
-
ReliabilityDiagram,
|
|
62
|
-
ConfidenceScore,
|
|
63
|
-
F1ScoreAtDifferentIOU,
|
|
64
|
-
ConfidenceDistribution,
|
|
65
|
-
PerClassAvgPrecision,
|
|
66
|
-
PerClassOutcomeCounts,
|
|
67
|
-
)
|