supervisely 6.73.254__py3-none-any.whl → 6.73.256__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/api/api.py +16 -8
- supervisely/api/file_api.py +16 -5
- supervisely/api/task_api.py +4 -2
- supervisely/app/widgets/field/field.py +10 -7
- supervisely/app/widgets/grid_gallery_v2/grid_gallery_v2.py +3 -1
- supervisely/io/network_exceptions.py +14 -2
- supervisely/nn/benchmark/base_benchmark.py +33 -35
- supervisely/nn/benchmark/base_evaluator.py +27 -1
- supervisely/nn/benchmark/base_visualizer.py +8 -11
- supervisely/nn/benchmark/comparison/base_visualizer.py +147 -0
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +5 -7
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +4 -6
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/{explore_predicttions.py → explore_predictions.py} +17 -17
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +3 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +7 -9
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +11 -22
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +3 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +22 -20
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +12 -6
- supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +31 -76
- supervisely/nn/benchmark/comparison/model_comparison.py +112 -19
- supervisely/nn/benchmark/comparison/semantic_segmentation/__init__.py +0 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/text_templates.py +128 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/__init__.py +21 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/classwise_error_analysis.py +68 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/explore_predictions.py +141 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/frequently_confused.py +71 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/iou_eou.py +68 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/overview.py +223 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/renormalized_error_ou.py +57 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/speedtest.py +314 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/visualizer.py +159 -0
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -1
- supervisely/nn/benchmark/object_detection/evaluator.py +1 -1
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +1 -3
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +3 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +3 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +1 -1
- supervisely/nn/benchmark/object_detection/visualizer.py +5 -10
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +12 -2
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +8 -9
- supervisely/nn/benchmark/semantic_segmentation/text_templates.py +2 -2
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +31 -1
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -3
- supervisely/nn/benchmark/semantic_segmentation/visualizer.py +7 -6
- supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +3 -21
- supervisely/nn/benchmark/visualization/renderer.py +25 -10
- supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +1 -0
- supervisely/nn/inference/inference.py +1 -0
- supervisely/nn/training/gui/gui.py +32 -10
- supervisely/nn/training/gui/training_artifacts.py +145 -0
- supervisely/nn/training/gui/training_process.py +3 -19
- supervisely/nn/training/train_app.py +179 -70
- {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/METADATA +1 -1
- {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/RECORD +60 -48
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/vis_metric.py +0 -19
- {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/LICENSE +0 -0
- {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/WHEEL +0 -0
- {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/top_level.txt +0 -0
|
@@ -3,14 +3,12 @@ from typing import List, Tuple
|
|
|
3
3
|
from supervisely.annotation.annotation import Annotation
|
|
4
4
|
from supervisely.api.image_api import ImageInfo
|
|
5
5
|
from supervisely.api.module_api import ApiField
|
|
6
|
-
from supervisely.nn.benchmark.
|
|
7
|
-
BaseVisMetric,
|
|
8
|
-
)
|
|
6
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
9
7
|
from supervisely.nn.benchmark.visualization.widgets import GalleryWidget, MarkdownWidget
|
|
10
8
|
from supervisely.project.project_meta import ProjectMeta
|
|
11
9
|
|
|
12
10
|
|
|
13
|
-
class ExplorePredictions(
|
|
11
|
+
class ExplorePredictions(BaseVisMetrics):
|
|
14
12
|
|
|
15
13
|
MARKDOWN_DIFFERENCE = "markdown_explore_difference"
|
|
16
14
|
GALLERY_DIFFERENCE = "explore_difference_gallery"
|
|
@@ -33,8 +31,9 @@ class ExplorePredictions(BaseVisMetric):
|
|
|
33
31
|
gallery.add_image_left_header("Click to explore more")
|
|
34
32
|
gallery.set_project_meta(self.eval_results[0].gt_project_meta)
|
|
35
33
|
gallery.set_images(*data)
|
|
36
|
-
|
|
37
|
-
gallery.
|
|
34
|
+
click_data = self.get_click_data_explore_all()
|
|
35
|
+
gallery.set_click_data(self.explore_modal_table.id, click_data)
|
|
36
|
+
gallery.set_show_all_data(self.explore_modal_table.id, click_data)
|
|
38
37
|
gallery._gallery._update_filters()
|
|
39
38
|
|
|
40
39
|
return gallery
|
|
@@ -51,18 +50,18 @@ class ExplorePredictions(BaseVisMetric):
|
|
|
51
50
|
for idx, eval_res in enumerate(self.eval_results):
|
|
52
51
|
if idx == 0:
|
|
53
52
|
dataset_info = eval_res.gt_dataset_infos[0]
|
|
54
|
-
image_infos = api.image.get_list(dataset_info.id, limit=5)
|
|
53
|
+
image_infos = api.image.get_list(dataset_info.id, limit=5, force_metadata_for_links=False)
|
|
55
54
|
ds_name = dataset_info.name
|
|
56
55
|
images_ids = [image_info.id for image_info in image_infos]
|
|
57
56
|
names = [image_info.name for image_info in image_infos]
|
|
58
57
|
images.append(image_infos)
|
|
59
|
-
anns = api.annotation.download_batch(dataset_info.id, images_ids)
|
|
58
|
+
anns = api.annotation.download_batch(dataset_info.id, images_ids, force_metadata_for_links=False)
|
|
60
59
|
annotations.append(anns)
|
|
61
60
|
skip_tags_filtering.append(True)
|
|
62
|
-
metas.append(eval_res.
|
|
61
|
+
metas.append(eval_res.pred_project_meta)
|
|
63
62
|
assert ds_name is not None, "Failed to get GT dataset name for gallery"
|
|
64
63
|
|
|
65
|
-
dataset_info = api.dataset.get_info_by_name(eval_res.
|
|
64
|
+
dataset_info = api.dataset.get_info_by_name(eval_res.pred_project_id, ds_name)
|
|
66
65
|
|
|
67
66
|
assert names is not None, "Failed to get GT image names for gallery"
|
|
68
67
|
image_infos = eval_res.api.image.get_list(
|
|
@@ -70,13 +69,14 @@ class ExplorePredictions(BaseVisMetric):
|
|
|
70
69
|
filters=[
|
|
71
70
|
{ApiField.FIELD: ApiField.NAME, ApiField.OPERATOR: "in", ApiField.VALUE: names}
|
|
72
71
|
],
|
|
72
|
+
force_metadata_for_links=False,
|
|
73
73
|
)
|
|
74
74
|
images_ids = [image_info.id for image_info in image_infos]
|
|
75
75
|
images.append(image_infos)
|
|
76
|
-
anns = eval_res.api.annotation.download_batch(dataset_info.id, images_ids)
|
|
76
|
+
anns = eval_res.api.annotation.download_batch(dataset_info.id, images_ids, force_metadata_for_links=False)
|
|
77
77
|
annotations.append(anns)
|
|
78
78
|
skip_tags_filtering.append(False)
|
|
79
|
-
min_conf = min(min_conf, eval_res.f1_optimal_conf)
|
|
79
|
+
min_conf = min(min_conf, eval_res.mp.f1_optimal_conf)
|
|
80
80
|
|
|
81
81
|
images = list(i for x in zip(*images) for i in x)
|
|
82
82
|
annotations = list(i for x in zip(*annotations) for i in x)
|
|
@@ -91,7 +91,7 @@ class ExplorePredictions(BaseVisMetric):
|
|
|
91
91
|
res["layoutTemplate"] = [{"skipObjectTagsFiltering": True, "columnTitle": "Ground Truth"}]
|
|
92
92
|
# for i in range(len(self.eval_results)):
|
|
93
93
|
for idx, eval_res in enumerate(self.eval_results, 1):
|
|
94
|
-
res["layoutTemplate"].append({"columnTitle": f"[{idx}] {eval_res.
|
|
94
|
+
res["layoutTemplate"].append({"columnTitle": f"[{idx}] {eval_res.name}"})
|
|
95
95
|
|
|
96
96
|
click_data = res.setdefault("clickData", {})
|
|
97
97
|
explore = click_data.setdefault("explore", {})
|
|
@@ -109,25 +109,25 @@ class ExplorePredictions(BaseVisMetric):
|
|
|
109
109
|
current_images_ids = []
|
|
110
110
|
current_images_names = []
|
|
111
111
|
for ds in dataset_infos:
|
|
112
|
-
image_infos =
|
|
112
|
+
image_infos = api.image.get_list(ds.id, force_metadata_for_links=False)
|
|
113
113
|
image_infos = sorted(image_infos, key=lambda x: x.name)
|
|
114
114
|
current_images_names.extend([image_info.name for image_info in image_infos])
|
|
115
115
|
current_images_ids.extend([image_info.id for image_info in image_infos])
|
|
116
116
|
images_ids.append(current_images_ids)
|
|
117
117
|
names = current_images_names
|
|
118
118
|
|
|
119
|
-
dataset_infos = api.dataset.get_list(eval_res.
|
|
119
|
+
dataset_infos = api.dataset.get_list(eval_res.pred_project_id)
|
|
120
120
|
dataset_infos = [ds for ds in dataset_infos if ds.name in ds_names]
|
|
121
121
|
dataset_infos = sorted(dataset_infos, key=lambda x: ds_names.index(x.name))
|
|
122
122
|
current_images_infos = []
|
|
123
123
|
for ds in dataset_infos:
|
|
124
|
-
image_infos =
|
|
124
|
+
image_infos = api.image.get_list(ds.id, force_metadata_for_links=False)
|
|
125
125
|
image_infos = [image_info for image_info in image_infos if image_info.name in names]
|
|
126
126
|
current_images_infos.extend(image_infos)
|
|
127
127
|
current_images_infos = sorted(current_images_infos, key=lambda x: names.index(x.name))
|
|
128
128
|
images_ids.append([image_info.id for image_info in current_images_infos])
|
|
129
129
|
|
|
130
|
-
min_conf = min(min_conf, eval_res.f1_optimal_conf)
|
|
130
|
+
min_conf = min(min_conf, eval_res.mp.f1_optimal_conf)
|
|
131
131
|
|
|
132
132
|
explore["imagesIds"] = list(i for x in zip(*images_ids) for i in x)
|
|
133
133
|
explore["filters"] = [{"type": "tag", "tagId": "confidence", "value": [min_conf, 1]}]
|
supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py
CHANGED
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
|
|
3
|
-
from supervisely.nn.benchmark.
|
|
4
|
-
BaseVisMetric,
|
|
5
|
-
)
|
|
3
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
6
4
|
from supervisely.nn.benchmark.cv_tasks import CVTask
|
|
7
5
|
from supervisely.nn.benchmark.visualization.widgets import (
|
|
8
6
|
ChartWidget,
|
|
@@ -12,7 +10,7 @@ from supervisely.nn.benchmark.visualization.widgets import (
|
|
|
12
10
|
)
|
|
13
11
|
|
|
14
12
|
|
|
15
|
-
class LocalizationAccuracyIoU(
|
|
13
|
+
class LocalizationAccuracyIoU(BaseVisMetrics):
|
|
16
14
|
@property
|
|
17
15
|
def header_md(self) -> MarkdownWidget:
|
|
18
16
|
title = "Localization Accuracy (IoU)"
|
|
@@ -90,7 +88,7 @@ class LocalizationAccuracyIoU(BaseVisMetric):
|
|
|
90
88
|
bin_width = min([bin_edges[1] - bin_edges[0] for _, bin_edges in hist_data])
|
|
91
89
|
|
|
92
90
|
for i, (eval_result, (hist, bin_edges)) in enumerate(zip(self.eval_results, hist_data)):
|
|
93
|
-
name = f"[{i+1}] {eval_result.
|
|
91
|
+
name = f"[{i+1}] {eval_result.name}"
|
|
94
92
|
kde = gaussian_kde(eval_result.mp.ious)
|
|
95
93
|
density = kde(x_range)
|
|
96
94
|
|
|
@@ -3,14 +3,12 @@ from typing import List
|
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
|
|
6
|
-
from supervisely.nn.benchmark.
|
|
7
|
-
BaseVisMetric,
|
|
8
|
-
)
|
|
6
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
9
7
|
from supervisely.nn.benchmark.visualization.widgets import ChartWidget
|
|
10
8
|
from supervisely.nn.task_type import TaskType
|
|
11
9
|
|
|
12
10
|
|
|
13
|
-
class OutcomeCounts(
|
|
11
|
+
class OutcomeCounts(BaseVisMetrics):
|
|
14
12
|
CHART_MAIN = "chart_outcome_counts"
|
|
15
13
|
CHART_COMPARISON = "chart_outcome_counts_comparison"
|
|
16
14
|
|
|
@@ -97,7 +95,7 @@ class OutcomeCounts(BaseVisMetric):
|
|
|
97
95
|
tp_counts = [eval_result.mp.TP_count for eval_result in self.eval_results][::-1]
|
|
98
96
|
fn_counts = [eval_result.mp.FN_count for eval_result in self.eval_results][::-1]
|
|
99
97
|
fp_counts = [eval_result.mp.FP_count for eval_result in self.eval_results][::-1]
|
|
100
|
-
model_names = [f"[{i}] {e.
|
|
98
|
+
model_names = [f"[{i}] {e.short_name}" for i, e in enumerate(self.eval_results, 1)][::-1]
|
|
101
99
|
counts = [tp_counts, fn_counts, fp_counts]
|
|
102
100
|
names = ["TP", "FN", "FP"]
|
|
103
101
|
colors = ["#8ACAA1", "#dd3f3f", "#F7ADAA"]
|
|
@@ -123,7 +121,7 @@ class OutcomeCounts(BaseVisMetric):
|
|
|
123
121
|
fig = go.Figure()
|
|
124
122
|
|
|
125
123
|
colors = ["#8ACAA1", "#dd3f3f", "#F7ADAA"]
|
|
126
|
-
model_names = [f"[{i}] {e.
|
|
124
|
+
model_names = [f"[{i}] {e.short_name}" for i, e in enumerate(self.eval_results, 1)][::-1]
|
|
127
125
|
model_names.append("Common")
|
|
128
126
|
|
|
129
127
|
diff_tps, common_tps = self.common_and_diff_tp
|
|
@@ -263,7 +261,7 @@ class OutcomeCounts(BaseVisMetric):
|
|
|
263
261
|
res["layoutTemplate"] = [None, None, None]
|
|
264
262
|
res["clickData"] = {}
|
|
265
263
|
for i, eval_result in enumerate(self.eval_results, 1):
|
|
266
|
-
model_name = f"[{i}] {eval_result.
|
|
264
|
+
model_name = f"[{i}] {eval_result.name}"
|
|
267
265
|
for outcome, matches_data in eval_result.click_data.outcome_counts.items():
|
|
268
266
|
key = f"{model_name}_{outcome}"
|
|
269
267
|
outcome_dict = res["clickData"].setdefault(key, {})
|
|
@@ -278,7 +276,7 @@ class OutcomeCounts(BaseVisMetric):
|
|
|
278
276
|
title = f"{model_name}. {outcome}: {len(obj_ids)} object{'s' if len(obj_ids) > 1 else ''}"
|
|
279
277
|
outcome_dict["title"] = title
|
|
280
278
|
outcome_dict["imagesIds"] = list(img_ids)
|
|
281
|
-
thr = eval_result.f1_optimal_conf
|
|
279
|
+
thr = eval_result.mp.f1_optimal_conf
|
|
282
280
|
if outcome == "FN":
|
|
283
281
|
outcome_dict["filters"] = [
|
|
284
282
|
{"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
|
|
@@ -327,7 +325,7 @@ class OutcomeCounts(BaseVisMetric):
|
|
|
327
325
|
_update_outcome_dict("Common", outcome, outcome_dict, common_ids)
|
|
328
326
|
|
|
329
327
|
for i, diff_ids in enumerate(diff_ids, 1):
|
|
330
|
-
name = f"[{i}] {self.eval_results[i - 1].
|
|
328
|
+
name = f"[{i}] {self.eval_results[i - 1].name}"
|
|
331
329
|
key = f"{name}_{outcome}"
|
|
332
330
|
outcome_dict = res["clickData"].setdefault(key, {})
|
|
333
331
|
|
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
from typing import List
|
|
2
2
|
|
|
3
3
|
from supervisely._utils import abs_url
|
|
4
|
-
from supervisely.nn.benchmark.
|
|
5
|
-
BaseVisMetric,
|
|
6
|
-
)
|
|
4
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
7
5
|
from supervisely.nn.benchmark.visualization.evaluation_result import EvalResult
|
|
8
6
|
from supervisely.nn.benchmark.visualization.widgets import (
|
|
9
7
|
ChartWidget,
|
|
@@ -12,7 +10,7 @@ from supervisely.nn.benchmark.visualization.widgets import (
|
|
|
12
10
|
)
|
|
13
11
|
|
|
14
12
|
|
|
15
|
-
class Overview(
|
|
13
|
+
class Overview(BaseVisMetrics):
|
|
16
14
|
|
|
17
15
|
MARKDOWN_OVERVIEW = "markdown_overview"
|
|
18
16
|
MARKDOWN_OVERVIEW_INFO = "markdown_overview_info"
|
|
@@ -62,7 +60,7 @@ class Overview(BaseVisMetric):
|
|
|
62
60
|
|
|
63
61
|
@property
|
|
64
62
|
def overview_widgets(self) -> List[MarkdownWidget]:
|
|
65
|
-
|
|
63
|
+
all_formats = []
|
|
66
64
|
for eval_result in self.eval_results:
|
|
67
65
|
|
|
68
66
|
url = eval_result.inference_info.get("checkpoint_url")
|
|
@@ -71,14 +69,10 @@ class Overview(BaseVisMetric):
|
|
|
71
69
|
link_text = url
|
|
72
70
|
link_text = link_text.replace("_", "\_")
|
|
73
71
|
|
|
74
|
-
checkpoint_name = eval_result.
|
|
75
|
-
|
|
76
|
-
)
|
|
77
|
-
model_name = eval_result.inference_info.get("model_name") or "Custom"
|
|
72
|
+
checkpoint_name = eval_result.checkpoint_name
|
|
73
|
+
model_name = eval_result.name or "Custom"
|
|
78
74
|
|
|
79
|
-
report = eval_result.api.file.get_info_by_path(
|
|
80
|
-
eval_result.team_id, eval_result.report_path
|
|
81
|
-
)
|
|
75
|
+
report = eval_result.api.file.get_info_by_path(self.team_id, eval_result.report_path)
|
|
82
76
|
report_link = abs_url(f"/model-benchmark?id={report.id}")
|
|
83
77
|
|
|
84
78
|
formats = [
|
|
@@ -91,11 +85,11 @@ class Overview(BaseVisMetric):
|
|
|
91
85
|
link_text,
|
|
92
86
|
report_link,
|
|
93
87
|
]
|
|
94
|
-
|
|
88
|
+
all_formats.append(formats)
|
|
95
89
|
|
|
96
90
|
text_template: str = getattr(self.vis_texts, self.MARKDOWN_OVERVIEW_INFO)
|
|
97
91
|
widgets = []
|
|
98
|
-
for formats in
|
|
92
|
+
for formats in all_formats:
|
|
99
93
|
md = MarkdownWidget(
|
|
100
94
|
name=self.MARKDOWN_OVERVIEW_INFO,
|
|
101
95
|
title="Overview",
|
|
@@ -204,7 +198,7 @@ class Overview(BaseVisMetric):
|
|
|
204
198
|
# Overall Metrics
|
|
205
199
|
fig = go.Figure()
|
|
206
200
|
for i, eval_result in enumerate(self.eval_results):
|
|
207
|
-
name = f"[{i + 1}] {eval_result.
|
|
201
|
+
name = f"[{i + 1}] {eval_result.name}"
|
|
208
202
|
base_metrics = eval_result.mp.base_metrics()
|
|
209
203
|
r = list(base_metrics.values())
|
|
210
204
|
theta = [eval_result.mp.metric_names[k] for k in base_metrics.keys()]
|
|
@@ -227,13 +221,8 @@ class Overview(BaseVisMetric):
|
|
|
227
221
|
angularaxis=dict(rotation=90, direction="clockwise"),
|
|
228
222
|
),
|
|
229
223
|
dragmode=False,
|
|
230
|
-
|
|
231
|
-
# width=700,
|
|
232
|
-
# height=500,
|
|
233
|
-
# autosize=False,
|
|
224
|
+
height=500,
|
|
234
225
|
margin=dict(l=25, r=25, t=25, b=25),
|
|
235
|
-
)
|
|
236
|
-
fig.update_layout(
|
|
237
226
|
modebar=dict(
|
|
238
227
|
remove=[
|
|
239
228
|
"zoom2d",
|
|
@@ -245,6 +234,6 @@ class Overview(BaseVisMetric):
|
|
|
245
234
|
"autoScale2d",
|
|
246
235
|
"resetScale2d",
|
|
247
236
|
]
|
|
248
|
-
)
|
|
237
|
+
),
|
|
249
238
|
)
|
|
250
239
|
return fig
|
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
|
|
3
3
|
from supervisely.imaging.color import hex2rgb
|
|
4
|
-
from supervisely.nn.benchmark.
|
|
5
|
-
BaseVisMetric,
|
|
6
|
-
)
|
|
4
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
7
5
|
from supervisely.nn.benchmark.visualization.widgets import (
|
|
8
6
|
ChartWidget,
|
|
9
7
|
CollapseWidget,
|
|
@@ -13,7 +11,7 @@ from supervisely.nn.benchmark.visualization.widgets import (
|
|
|
13
11
|
)
|
|
14
12
|
|
|
15
13
|
|
|
16
|
-
class PrCurve(
|
|
14
|
+
class PrCurve(BaseVisMetrics):
|
|
17
15
|
MARKDOWN_PR_CURVE = "markdown_pr_curve"
|
|
18
16
|
MARKDOWN_PR_TRADE_OFFS = "markdown_trade_offs"
|
|
19
17
|
MARKDOWN_WHAT_IS_PR_CURVE = "markdown_what_is_pr_curve"
|
|
@@ -98,7 +96,7 @@ class PrCurve(BaseVisMetric):
|
|
|
98
96
|
pr_curve[pr_curve == -1] = np.nan
|
|
99
97
|
pr_curve = np.nanmean(pr_curve, axis=-1)
|
|
100
98
|
|
|
101
|
-
name = f"[{i}] {eval_result.
|
|
99
|
+
name = f"[{i}] {eval_result.name}"
|
|
102
100
|
color = ",".join(map(str, hex2rgb(eval_result.color))) + ",0.1"
|
|
103
101
|
line = go.Scatter(
|
|
104
102
|
x=eval_result.mp.recThrs,
|
supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
|
1
|
-
from supervisely.nn.benchmark.
|
|
2
|
-
BaseVisMetric,
|
|
3
|
-
)
|
|
1
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
4
2
|
from supervisely.nn.benchmark.visualization.widgets import (
|
|
5
3
|
ChartWidget,
|
|
6
4
|
CollapseWidget,
|
|
@@ -10,7 +8,7 @@ from supervisely.nn.benchmark.visualization.widgets import (
|
|
|
10
8
|
)
|
|
11
9
|
|
|
12
10
|
|
|
13
|
-
class PrecisionRecallF1(
|
|
11
|
+
class PrecisionRecallF1(BaseVisMetrics):
|
|
14
12
|
MARKDOWN = "markdown_PRF1"
|
|
15
13
|
MARKDOWN_PRECISION_TITLE = "markdown_precision_per_class_title"
|
|
16
14
|
MARKDOWN_RECALL_TITLE = "markdown_recall_per_class_title"
|
|
@@ -136,14 +134,14 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
136
134
|
precision = eval_result.mp.json_metrics()["precision"]
|
|
137
135
|
recall = eval_result.mp.json_metrics()["recall"]
|
|
138
136
|
f1 = eval_result.mp.json_metrics()["f1"]
|
|
139
|
-
model_name = f"[{i}] {eval_result.
|
|
137
|
+
model_name = f"[{i}] {eval_result.name}"
|
|
140
138
|
fig.add_trace(
|
|
141
139
|
go.Bar(
|
|
142
140
|
x=["Precision", "Recall", "F1-score"],
|
|
143
141
|
y=[precision, recall, f1],
|
|
144
142
|
name=model_name,
|
|
145
143
|
width=0.2 if classes_cnt >= 5 else None,
|
|
146
|
-
marker=dict(color=eval_result.color),
|
|
144
|
+
marker=dict(color=eval_result.color, line=dict(width=0.7)),
|
|
147
145
|
)
|
|
148
146
|
)
|
|
149
147
|
|
|
@@ -152,7 +150,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
152
150
|
xaxis_title="Metric",
|
|
153
151
|
yaxis_title="Value",
|
|
154
152
|
yaxis=dict(range=[0, 1.1]),
|
|
155
|
-
width=700
|
|
153
|
+
width=700,
|
|
156
154
|
)
|
|
157
155
|
|
|
158
156
|
return fig
|
|
@@ -163,7 +161,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
163
161
|
fig = go.Figure()
|
|
164
162
|
classes_cnt = len(self.eval_results[0].mp.cat_names)
|
|
165
163
|
for i, eval_result in enumerate(self.eval_results, 1):
|
|
166
|
-
model_name = f"[{i}] {eval_result.
|
|
164
|
+
model_name = f"[{i}] {eval_result.name}"
|
|
167
165
|
sorted_by_f1 = eval_result.mp.per_class_metrics().sort_values(by="f1")
|
|
168
166
|
|
|
169
167
|
fig.add_trace(
|
|
@@ -171,8 +169,8 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
171
169
|
y=sorted_by_f1["recall"],
|
|
172
170
|
x=sorted_by_f1["category"],
|
|
173
171
|
name=f"{model_name} Recall",
|
|
174
|
-
width=0.2 if classes_cnt
|
|
175
|
-
marker=dict(color=eval_result.color),
|
|
172
|
+
width=0.2 if classes_cnt >= 5 else None,
|
|
173
|
+
marker=dict(color=eval_result.color, line=dict(width=0.7)),
|
|
176
174
|
)
|
|
177
175
|
)
|
|
178
176
|
|
|
@@ -191,7 +189,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
191
189
|
res["layoutTemplate"] = [None, None, None]
|
|
192
190
|
res["clickData"] = {}
|
|
193
191
|
for i, eval_result in enumerate(self.eval_results):
|
|
194
|
-
model_name = f"Model [{i + 1}] {eval_result.
|
|
192
|
+
model_name = f"Model [{i + 1}] {eval_result.name}"
|
|
195
193
|
for key, v in eval_result.click_data.objects_by_class.items():
|
|
196
194
|
click_data = res["clickData"].setdefault(f"{i}_{key}", {})
|
|
197
195
|
img_ids, obj_ids = set(), set()
|
|
@@ -207,7 +205,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
207
205
|
{
|
|
208
206
|
"type": "tag",
|
|
209
207
|
"tagId": "confidence",
|
|
210
|
-
"value": [eval_result.f1_optimal_conf, 1],
|
|
208
|
+
"value": [eval_result.mp.f1_optimal_conf, 1],
|
|
211
209
|
},
|
|
212
210
|
{"type": "tag", "tagId": "outcome", "value": "TP"},
|
|
213
211
|
{"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
|
|
@@ -220,7 +218,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
220
218
|
fig = go.Figure()
|
|
221
219
|
classes_cnt = len(self.eval_results[0].mp.cat_names)
|
|
222
220
|
for i, eval_result in enumerate(self.eval_results, 1):
|
|
223
|
-
model_name = f"[{i}] {eval_result.
|
|
221
|
+
model_name = f"[{i}] {eval_result.name}"
|
|
224
222
|
sorted_by_f1 = eval_result.mp.per_class_metrics().sort_values(by="f1")
|
|
225
223
|
|
|
226
224
|
fig.add_trace(
|
|
@@ -228,8 +226,8 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
228
226
|
y=sorted_by_f1["precision"],
|
|
229
227
|
x=sorted_by_f1["category"],
|
|
230
228
|
name=f"{model_name} Precision",
|
|
231
|
-
width=0.2 if classes_cnt
|
|
232
|
-
marker=dict(color=eval_result.color),
|
|
229
|
+
width=0.2 if classes_cnt >= 5 else None,
|
|
230
|
+
marker=dict(color=eval_result.color, line=dict(width=0.7)),
|
|
233
231
|
)
|
|
234
232
|
)
|
|
235
233
|
|
|
@@ -249,7 +247,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
249
247
|
fig = go.Figure()
|
|
250
248
|
classes_cnt = len(self.eval_results[0].mp.cat_names)
|
|
251
249
|
for i, eval_result in enumerate(self.eval_results, 1):
|
|
252
|
-
model_name = f"[{i}] {eval_result.
|
|
250
|
+
model_name = f"[{i}] {eval_result.name}"
|
|
253
251
|
sorted_by_f1 = eval_result.mp.per_class_metrics().sort_values(by="f1")
|
|
254
252
|
|
|
255
253
|
fig.add_trace(
|
|
@@ -257,8 +255,8 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
257
255
|
y=sorted_by_f1["f1"],
|
|
258
256
|
x=sorted_by_f1["category"],
|
|
259
257
|
name=f"{model_name} F1-score",
|
|
260
|
-
width=0.2 if classes_cnt
|
|
261
|
-
marker=dict(color=eval_result.color),
|
|
258
|
+
width=0.2 if classes_cnt >= 5 else None,
|
|
259
|
+
marker=dict(color=eval_result.color, line=dict(width=0.7)),
|
|
262
260
|
)
|
|
263
261
|
)
|
|
264
262
|
|
|
@@ -278,7 +276,7 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
278
276
|
res["clickData"] = {}
|
|
279
277
|
|
|
280
278
|
for i, eval_result in enumerate(self.eval_results):
|
|
281
|
-
model_name = f"Model [{i + 1}] {eval_result.
|
|
279
|
+
model_name = f"Model [{i + 1}] {eval_result.name}"
|
|
282
280
|
click_data = res["clickData"].setdefault(i, {})
|
|
283
281
|
img_ids, obj_ids = set(), set()
|
|
284
282
|
objects_cnt = 0
|
|
@@ -292,7 +290,11 @@ class PrecisionRecallF1(BaseVisMetric):
|
|
|
292
290
|
click_data["title"] = f"{model_name}, {objects_cnt} objects"
|
|
293
291
|
click_data["imagesIds"] = list(img_ids)
|
|
294
292
|
click_data["filters"] = [
|
|
295
|
-
{
|
|
293
|
+
{
|
|
294
|
+
"type": "tag",
|
|
295
|
+
"tagId": "confidence",
|
|
296
|
+
"value": [eval_result.mp.f1_optimal_conf, 1],
|
|
297
|
+
},
|
|
296
298
|
{"type": "tag", "tagId": "outcome", "value": "TP"},
|
|
297
299
|
{"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
|
|
298
300
|
]
|
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
from typing import List, Union
|
|
2
2
|
|
|
3
3
|
from supervisely.imaging.color import hex2rgb
|
|
4
|
-
from supervisely.nn.benchmark.
|
|
5
|
-
BaseVisMetric,
|
|
6
|
-
)
|
|
4
|
+
from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
|
|
7
5
|
from supervisely.nn.benchmark.visualization.widgets import (
|
|
8
6
|
ChartWidget,
|
|
9
7
|
MarkdownWidget,
|
|
@@ -11,11 +9,19 @@ from supervisely.nn.benchmark.visualization.widgets import (
|
|
|
11
9
|
)
|
|
12
10
|
|
|
13
11
|
|
|
14
|
-
class Speedtest(
|
|
12
|
+
class Speedtest(BaseVisMetrics):
|
|
15
13
|
|
|
16
14
|
def is_empty(self) -> bool:
|
|
17
15
|
return not any(eval_result.speedtest_info for eval_result in self.eval_results)
|
|
18
16
|
|
|
17
|
+
def multiple_batche_sizes(self) -> bool:
|
|
18
|
+
for eval_result in self.eval_results:
|
|
19
|
+
if eval_result.speedtest_info is None:
|
|
20
|
+
continue
|
|
21
|
+
if len(eval_result.speedtest_info["speedtest"]) > 1:
|
|
22
|
+
return True
|
|
23
|
+
return False
|
|
24
|
+
|
|
19
25
|
@property
|
|
20
26
|
def latency(self) -> List[Union[int, str]]:
|
|
21
27
|
latency = []
|
|
@@ -272,7 +278,7 @@ class Speedtest(BaseVisMetric):
|
|
|
272
278
|
go.Scatter(
|
|
273
279
|
x=list(temp_res["ms"].keys()),
|
|
274
280
|
y=list(temp_res["ms"].values()),
|
|
275
|
-
name=f"[{idx}] {eval_result.
|
|
281
|
+
name=f"[{idx}] {eval_result.name} (ms)",
|
|
276
282
|
line=dict(color=eval_result.color),
|
|
277
283
|
customdata=list(temp_res["ms_std"].values()),
|
|
278
284
|
error_y=dict(
|
|
@@ -290,7 +296,7 @@ class Speedtest(BaseVisMetric):
|
|
|
290
296
|
go.Scatter(
|
|
291
297
|
x=list(temp_res["fps"].keys()),
|
|
292
298
|
y=list(temp_res["fps"].values()),
|
|
293
|
-
name=f"[{idx}] {eval_result.
|
|
299
|
+
name=f"[{idx}] {eval_result.name} (fps)",
|
|
294
300
|
line=dict(color=eval_result.color),
|
|
295
301
|
hovertemplate="Batch Size: %{x}<br>FPS: %{y:.2f}<extra></extra>", # <br> Standard deviation: %{customdata:.2f}<extra></extra>",
|
|
296
302
|
),
|