supervisely 6.73.254__py3-none-any.whl → 6.73.255__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/api/file_api.py +16 -5
- supervisely/api/task_api.py +4 -2
- supervisely/app/widgets/field/field.py +10 -7
- supervisely/app/widgets/grid_gallery_v2/grid_gallery_v2.py +3 -1
- supervisely/nn/benchmark/base_benchmark.py +33 -35
- supervisely/nn/benchmark/base_evaluator.py +27 -1
- supervisely/nn/benchmark/base_visualizer.py +8 -11
- supervisely/nn/benchmark/comparison/base_visualizer.py +147 -0
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +5 -7
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +4 -6
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/{explore_predicttions.py → explore_predictions.py} +17 -17
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +3 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +7 -9
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +11 -22
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +3 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +22 -20
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +12 -6
- supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +31 -76
- supervisely/nn/benchmark/comparison/model_comparison.py +112 -19
- supervisely/nn/benchmark/comparison/semantic_segmentation/__init__.py +0 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/text_templates.py +128 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/__init__.py +21 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/classwise_error_analysis.py +68 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/explore_predictions.py +141 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/frequently_confused.py +71 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/iou_eou.py +68 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/overview.py +223 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/renormalized_error_ou.py +57 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/speedtest.py +314 -0
- supervisely/nn/benchmark/comparison/semantic_segmentation/visualizer.py +159 -0
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -1
- supervisely/nn/benchmark/object_detection/evaluator.py +1 -1
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +1 -3
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +3 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +3 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +1 -1
- supervisely/nn/benchmark/object_detection/visualizer.py +5 -10
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +12 -2
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +8 -9
- supervisely/nn/benchmark/semantic_segmentation/text_templates.py +2 -2
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +31 -1
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -3
- supervisely/nn/benchmark/semantic_segmentation/visualizer.py +7 -6
- supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +3 -21
- supervisely/nn/benchmark/visualization/renderer.py +25 -10
- supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +1 -0
- supervisely/nn/inference/inference.py +1 -0
- supervisely/nn/training/gui/gui.py +32 -10
- supervisely/nn/training/gui/training_artifacts.py +145 -0
- supervisely/nn/training/gui/training_process.py +3 -19
- supervisely/nn/training/train_app.py +179 -70
- {supervisely-6.73.254.dist-info → supervisely-6.73.255.dist-info}/METADATA +1 -1
- {supervisely-6.73.254.dist-info → supervisely-6.73.255.dist-info}/RECORD +58 -46
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/vis_metric.py +0 -19
- {supervisely-6.73.254.dist-info → supervisely-6.73.255.dist-info}/LICENSE +0 -0
- {supervisely-6.73.254.dist-info → supervisely-6.73.255.dist-info}/WHEEL +0 -0
- {supervisely-6.73.254.dist-info → supervisely-6.73.255.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.254.dist-info → supervisely-6.73.255.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
import supervisely.nn.benchmark.comparison.semantic_segmentation.text_templates as texts
|
|
4
|
+
from supervisely.nn.benchmark.comparison.base_visualizer import BaseComparisonVisualizer
|
|
5
|
+
from supervisely.nn.benchmark.comparison.semantic_segmentation.vis_metrics import (
|
|
6
|
+
ClasswiseErrorAnalysis,
|
|
7
|
+
ExplorePredictions,
|
|
8
|
+
FrequentlyConfused,
|
|
9
|
+
IntersectionErrorOverUnion,
|
|
10
|
+
Overview,
|
|
11
|
+
RenormalizedErrorOverUnion,
|
|
12
|
+
Speedtest,
|
|
13
|
+
)
|
|
14
|
+
from supervisely.nn.benchmark.semantic_segmentation.evaluator import (
|
|
15
|
+
SemanticSegmentationEvalResult,
|
|
16
|
+
)
|
|
17
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
18
|
+
ContainerWidget,
|
|
19
|
+
MarkdownWidget,
|
|
20
|
+
SidebarWidget,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class SemanticSegmentationComparisonVisualizer(BaseComparisonVisualizer):
|
|
25
|
+
vis_texts = texts
|
|
26
|
+
ann_opacity = 0.7
|
|
27
|
+
|
|
28
|
+
def __init__(self, *args, **kwargs):
|
|
29
|
+
super().__init__(*args, **kwargs)
|
|
30
|
+
self.eval_results: List[SemanticSegmentationEvalResult]
|
|
31
|
+
|
|
32
|
+
def _create_widgets(self):
|
|
33
|
+
# Modal Gellery
|
|
34
|
+
self.diff_modal = self._create_diff_modal_table()
|
|
35
|
+
self.explore_modal = self._create_explore_modal_table(
|
|
36
|
+
click_gallery_id=self.diff_modal.id, hover_text="Compare with GT"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Notifcation
|
|
40
|
+
self.clickable_label = self._create_clickable_label()
|
|
41
|
+
|
|
42
|
+
# Speedtest init here for overview
|
|
43
|
+
speedtest = Speedtest(self.vis_texts, self.comparison.eval_results)
|
|
44
|
+
|
|
45
|
+
# Overview
|
|
46
|
+
overview = Overview(self.vis_texts, self.comparison.eval_results)
|
|
47
|
+
overview.team_id = self.comparison.team_id
|
|
48
|
+
self.header = self._create_header()
|
|
49
|
+
self.overviews = self._create_overviews(overview, grid_cols=2)
|
|
50
|
+
self.overview_md = overview.overview_md
|
|
51
|
+
self.key_metrics_md = self._create_key_metrics()
|
|
52
|
+
self.key_metrics_table = overview.get_table_widget(
|
|
53
|
+
latency=speedtest.latency, fps=speedtest.fps
|
|
54
|
+
)
|
|
55
|
+
self.overview_chart = overview.chart_widget
|
|
56
|
+
|
|
57
|
+
# Explore Predictions
|
|
58
|
+
columns_number = len(self.comparison.eval_results) + 1 # +1 for GT
|
|
59
|
+
self.explore_predictions_modal_gallery = self._create_explore_modal_table(columns_number)
|
|
60
|
+
explore_predictions = ExplorePredictions(
|
|
61
|
+
self.vis_texts,
|
|
62
|
+
self.comparison.eval_results,
|
|
63
|
+
explore_modal_table=self.explore_predictions_modal_gallery,
|
|
64
|
+
)
|
|
65
|
+
self.explore_predictions_md = explore_predictions.difference_predictions_md
|
|
66
|
+
self.explore_predictions_gallery = explore_predictions.explore_gallery
|
|
67
|
+
|
|
68
|
+
# IntersectionErrorOverUnion
|
|
69
|
+
iou_eou = IntersectionErrorOverUnion(self.vis_texts, self.comparison.eval_results)
|
|
70
|
+
self.iou_eou_md = iou_eou.md
|
|
71
|
+
self.iou_eou_chart = iou_eou.chart
|
|
72
|
+
|
|
73
|
+
# RenormalizedErrorOverUnion
|
|
74
|
+
reou = RenormalizedErrorOverUnion(self.vis_texts, self.comparison.eval_results)
|
|
75
|
+
self.reou_md = reou.md
|
|
76
|
+
self.reou_chart = reou.chart
|
|
77
|
+
|
|
78
|
+
# ClasswiseErrorAnalysis
|
|
79
|
+
classwise_ea = ClasswiseErrorAnalysis(self.vis_texts, self.comparison.eval_results)
|
|
80
|
+
self.classwise_ea_md = classwise_ea.md
|
|
81
|
+
self.classwise_ea_chart = classwise_ea.chart
|
|
82
|
+
|
|
83
|
+
# FrequentlyConfused
|
|
84
|
+
frequently_confused = FrequentlyConfused(self.vis_texts, self.comparison.eval_results)
|
|
85
|
+
self.frequently_confused_md = frequently_confused.md
|
|
86
|
+
self.frequently_confused_chart = frequently_confused.chart
|
|
87
|
+
|
|
88
|
+
# # SpeedTest
|
|
89
|
+
self.speedtest_present = not speedtest.is_empty()
|
|
90
|
+
self.speedtest_multiple_batch_sizes = False
|
|
91
|
+
|
|
92
|
+
if self.speedtest_present:
|
|
93
|
+
self.speedtest_md_intro = speedtest.md_intro
|
|
94
|
+
self.speedtest_intro_table = speedtest.intro_table
|
|
95
|
+
self.speed_inference_time_md = speedtest.inference_time_md
|
|
96
|
+
self.speed_inference_time_table = speedtest.inference_time_table
|
|
97
|
+
self.speed_fps_md = speedtest.fps_md
|
|
98
|
+
self.speed_fps_table = speedtest.fps_table
|
|
99
|
+
self.speedtest_multiple_batch_sizes = speedtest.multiple_batche_sizes()
|
|
100
|
+
if self.speedtest_multiple_batch_sizes:
|
|
101
|
+
self.speed_batch_inference_md = speedtest.batch_inference_md
|
|
102
|
+
self.speed_chart = speedtest.chart
|
|
103
|
+
|
|
104
|
+
def _create_layout(self):
|
|
105
|
+
is_anchors_widgets = [
|
|
106
|
+
# Overview
|
|
107
|
+
(0, self.header),
|
|
108
|
+
(1, self.overview_md),
|
|
109
|
+
(0, self.overviews),
|
|
110
|
+
(1, self.key_metrics_md),
|
|
111
|
+
(0, self.key_metrics_table),
|
|
112
|
+
(0, self.overview_chart),
|
|
113
|
+
# Explore Predictions
|
|
114
|
+
(1, self.explore_predictions_md),
|
|
115
|
+
(0, self.explore_predictions_gallery),
|
|
116
|
+
# IntersectionErrorOverUnion
|
|
117
|
+
(1, self.iou_eou_md),
|
|
118
|
+
(0, self.iou_eou_chart),
|
|
119
|
+
# RenormalizedErrorOverUnion
|
|
120
|
+
(1, self.reou_md),
|
|
121
|
+
(0, self.reou_chart),
|
|
122
|
+
# ClasswiseErrorAnalysis
|
|
123
|
+
(1, self.classwise_ea_md),
|
|
124
|
+
(0, self.classwise_ea_chart),
|
|
125
|
+
# FrequentlyConfused
|
|
126
|
+
(1, self.frequently_confused_md),
|
|
127
|
+
(0, self.frequently_confused_chart),
|
|
128
|
+
]
|
|
129
|
+
if self.speedtest_present:
|
|
130
|
+
is_anchors_widgets.extend(
|
|
131
|
+
[
|
|
132
|
+
# SpeedTest
|
|
133
|
+
(1, self.speedtest_md_intro),
|
|
134
|
+
(0, self.speedtest_intro_table),
|
|
135
|
+
(0, self.speed_inference_time_md),
|
|
136
|
+
(0, self.speed_inference_time_table),
|
|
137
|
+
(0, self.speed_fps_md),
|
|
138
|
+
(0, self.speed_fps_table),
|
|
139
|
+
]
|
|
140
|
+
)
|
|
141
|
+
if self.speedtest_multiple_batch_sizes:
|
|
142
|
+
is_anchors_widgets.append((0, self.speed_batch_inference_md))
|
|
143
|
+
is_anchors_widgets.append((0, self.speed_chart))
|
|
144
|
+
anchors = []
|
|
145
|
+
for is_anchor, widget in is_anchors_widgets:
|
|
146
|
+
if is_anchor:
|
|
147
|
+
anchors.append(widget.id)
|
|
148
|
+
|
|
149
|
+
sidebar = SidebarWidget(widgets=[i[1] for i in is_anchors_widgets], anchors=anchors)
|
|
150
|
+
layout = ContainerWidget(
|
|
151
|
+
widgets=[sidebar, self.explore_modal, self.explore_predictions_modal_gallery],
|
|
152
|
+
name="main_container",
|
|
153
|
+
)
|
|
154
|
+
return layout
|
|
155
|
+
|
|
156
|
+
def _create_key_metrics(self) -> MarkdownWidget:
|
|
157
|
+
return MarkdownWidget(
|
|
158
|
+
"markdown_key_metrics", "Key Metrics", text=self.vis_texts.markdown_key_metrics
|
|
159
|
+
)
|
|
@@ -39,7 +39,7 @@ class InstanceSegmentationEvaluator(ObjectDetectionEvaluator):
|
|
|
39
39
|
except AssertionError as e:
|
|
40
40
|
raise ValueError(
|
|
41
41
|
f"{e}. Please make sure that your GT and DT projects are correct. "
|
|
42
|
-
"If GT project has nested datasets and DT project was
|
|
42
|
+
"If GT project has nested datasets and DT project was created with NN app, "
|
|
43
43
|
"try to use newer version of NN app."
|
|
44
44
|
)
|
|
45
45
|
|
|
@@ -104,7 +104,7 @@ class ObjectDetectionEvaluator(BaseEvaluator):
|
|
|
104
104
|
except AssertionError as e:
|
|
105
105
|
raise ValueError(
|
|
106
106
|
f"{e}. Please make sure that your GT and DT projects are correct. "
|
|
107
|
-
"If GT project has nested datasets and DT project was
|
|
107
|
+
"If GT project has nested datasets and DT project was created with NN app, "
|
|
108
108
|
"try to use newer version of NN app."
|
|
109
109
|
)
|
|
110
110
|
self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
|
|
@@ -24,9 +24,7 @@ class Overview(DetectionVisMetric):
|
|
|
24
24
|
link_text = link_text.replace("_", "\_")
|
|
25
25
|
|
|
26
26
|
model_name = self.eval_result.inference_info.get("model_name") or "Custom"
|
|
27
|
-
checkpoint_name = self.eval_result.
|
|
28
|
-
"checkpoint_name", ""
|
|
29
|
-
)
|
|
27
|
+
checkpoint_name = self.eval_result.checkpoint_name
|
|
30
28
|
|
|
31
29
|
# Note about validation dataset
|
|
32
30
|
classes_str, note_about_images, starter_app_info = self._get_overview_info()
|
|
@@ -50,7 +50,7 @@ class RecallVsPrecision(DetectionVisMetric):
|
|
|
50
50
|
marker=dict(color=orange_color),
|
|
51
51
|
)
|
|
52
52
|
)
|
|
53
|
-
fig.update_layout(barmode="group")
|
|
53
|
+
fig.update_layout(barmode="group", width=800 if len(sorted_by_f1) < 10 else None)
|
|
54
54
|
fig.update_xaxes(title_text="Class")
|
|
55
55
|
fig.update_yaxes(title_text="Value", range=[0, 1])
|
|
56
56
|
return fig
|
|
@@ -56,21 +56,17 @@ class ObjectDetectionVisualizer(BaseVisualizer):
|
|
|
56
56
|
self._widgets = False
|
|
57
57
|
self.ann_opacity = 0.4
|
|
58
58
|
|
|
59
|
-
diff_project_info, diff_dataset_infos,
|
|
59
|
+
diff_project_info, diff_dataset_infos, _ = self._get_or_create_diff_project()
|
|
60
60
|
self.eval_result.diff_project_info = diff_project_info
|
|
61
61
|
self.eval_result.diff_dataset_infos = diff_dataset_infos
|
|
62
62
|
self.eval_result.matched_pair_data = {}
|
|
63
63
|
|
|
64
64
|
self.gt_project_path = str(Path(self.workdir).parent / "gt_project")
|
|
65
65
|
self.pred_project_path = str(Path(self.workdir).parent / "pred_project")
|
|
66
|
-
|
|
67
|
-
self.update_diff_annotations()
|
|
68
|
-
else:
|
|
69
|
-
self._init_match_data()
|
|
66
|
+
self.update_diff_annotations()
|
|
70
67
|
|
|
71
68
|
# set filtered project meta
|
|
72
69
|
self.eval_result.filtered_project_meta = self._get_filtered_project_meta(self.eval_result)
|
|
73
|
-
|
|
74
70
|
self._get_sample_data_for_gallery()
|
|
75
71
|
|
|
76
72
|
@property
|
|
@@ -222,10 +218,9 @@ class ObjectDetectionVisualizer(BaseVisualizer):
|
|
|
222
218
|
|
|
223
219
|
# Speedtest init here for overview
|
|
224
220
|
speedtest = Speedtest(self.vis_texts, self.eval_result)
|
|
225
|
-
self.speedtest_present =
|
|
221
|
+
self.speedtest_present = not speedtest.is_empty()
|
|
226
222
|
self.speedtest_batch_sizes_cnt = speedtest.num_batche_sizes
|
|
227
|
-
if
|
|
228
|
-
self.speedtest_present = True
|
|
223
|
+
if self.speedtest_present:
|
|
229
224
|
self.speedtest_md_intro = speedtest.intro_md
|
|
230
225
|
self.speedtest_table_md = speedtest.table_md
|
|
231
226
|
self.speedtest_table = speedtest.table
|
|
@@ -393,7 +388,7 @@ class ObjectDetectionVisualizer(BaseVisualizer):
|
|
|
393
388
|
|
|
394
389
|
pred_tag_list = []
|
|
395
390
|
with self.pbar(
|
|
396
|
-
message="Visualizations: Creating
|
|
391
|
+
message="Visualizations: Creating difference project", total=pred_project.total_items
|
|
397
392
|
) as progress:
|
|
398
393
|
logger.debug(
|
|
399
394
|
"Creating diff project data",
|
|
@@ -97,7 +97,13 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
|
|
|
97
97
|
meta_path = Path(project_path) / "meta.json"
|
|
98
98
|
meta = ProjectMeta.from_json(load_json_file(meta_path))
|
|
99
99
|
|
|
100
|
-
palette = [
|
|
100
|
+
palette = []
|
|
101
|
+
for cls_name in self.classes_whitelist:
|
|
102
|
+
obj_cls = meta.get_obj_class(cls_name)
|
|
103
|
+
if obj_cls is None:
|
|
104
|
+
palette.append((0, 0, 0))
|
|
105
|
+
else:
|
|
106
|
+
palette.append(obj_cls.color)
|
|
101
107
|
|
|
102
108
|
return palette
|
|
103
109
|
|
|
@@ -123,7 +129,11 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
|
|
|
123
129
|
continue
|
|
124
130
|
|
|
125
131
|
palette = self._get_palette(src_dir)
|
|
126
|
-
|
|
132
|
+
bg_cls_idx = self.classes_whitelist.index(self.bg_cls_name)
|
|
133
|
+
try:
|
|
134
|
+
bg_color = palette[bg_cls_idx]
|
|
135
|
+
except IndexError:
|
|
136
|
+
bg_color = (0, 0, 0)
|
|
127
137
|
output_dir.mkdir(parents=True)
|
|
128
138
|
temp_seg_dir = src_dir + "_temp"
|
|
129
139
|
if not os.path.exists(temp_seg_dir):
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, Dict
|
|
1
|
+
from typing import Any, Dict, Optional
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
|
|
@@ -65,7 +65,7 @@ class MetricProvider:
|
|
|
65
65
|
|
|
66
66
|
# frequently confused classes
|
|
67
67
|
self.frequently_confused = self.get_frequently_confused(
|
|
68
|
-
eval_data["confusion_matrix"].copy()
|
|
68
|
+
eval_data["confusion_matrix"].copy(), n_pairs=20
|
|
69
69
|
)
|
|
70
70
|
|
|
71
71
|
def json_metrics(self):
|
|
@@ -114,18 +114,17 @@ class MetricProvider:
|
|
|
114
114
|
confusion_matrix = confusion_matrix[::-1]
|
|
115
115
|
return confusion_matrix, class_names
|
|
116
116
|
|
|
117
|
-
def get_frequently_confused(self, confusion_matrix: np.ndarray):
|
|
118
|
-
n_pairs = 20
|
|
117
|
+
def get_frequently_confused(self, confusion_matrix: np.ndarray, n_pairs: Optional[int] = None):
|
|
119
118
|
|
|
120
|
-
|
|
119
|
+
non_diagonal_ids = {}
|
|
121
120
|
for i, idx in enumerate(np.ndindex(confusion_matrix.shape)):
|
|
122
121
|
if idx[0] != idx[1]:
|
|
123
|
-
|
|
122
|
+
non_diagonal_ids[i] = idx
|
|
124
123
|
|
|
125
124
|
indexes_1d = np.argsort(confusion_matrix, axis=None)
|
|
126
|
-
indexes_2d = [
|
|
127
|
-
|
|
128
|
-
|
|
125
|
+
indexes_2d = [non_diagonal_ids[idx] for idx in indexes_1d if idx in non_diagonal_ids]
|
|
126
|
+
if n_pairs is not None:
|
|
127
|
+
indexes_2d = indexes_2d[:n_pairs]
|
|
129
128
|
indexes_2d = np.asarray(indexes_2d[::-1])
|
|
130
129
|
|
|
131
130
|
rows = indexes_2d[:, 0]
|
|
@@ -40,7 +40,7 @@ We provide a comprehensive model performance analysis using a set of metrics, in
|
|
|
40
40
|
|
|
41
41
|
- **Pixel accuracy**: reflects the percent of image pixels which were correctly classified.
|
|
42
42
|
- **Precision**: reflects the number of correctly predicted positive segmentations divided by the total number of predicted positive segmentations.
|
|
43
|
-
- **
|
|
43
|
+
- **Recall**: reflects the number of correctly predicted positive segmentations divided by the number of all samples that should have been segmented as positive.
|
|
44
44
|
- **F1-score**: reflects the tradeoff between precision and recall. It is equivalent to the Dice coefficient and calculated as a harmonic mean of precision and recall.
|
|
45
45
|
- **Intersection over union (IoU, also known as the Jaccard index)**: measures the overlap between ground truth mask and predicted mask. It is calculated as the ratio of the intersection of the two masks areas to their combined areas.
|
|
46
46
|
- **Boundary intersection over union**: a segmentation consistency measure that first computes the sets of ground truth and predicted masks pixels that are located within the distance d from each contour and then computes intersection over union of these two sets. Pixel distance parameter d (pixel width of the boundary region) controls the sensitivity of the metric, it is usually set as 2% of the image diagonal for normal resolution images and 0.5% of the image diagonal for high resolution images.
|
|
@@ -72,7 +72,7 @@ The pie chart below demonstrates what the model lacked in order to show the perf
|
|
|
72
72
|
|
|
73
73
|
markdown_renormalized_error_ou = """## Renormalized Error Over Union
|
|
74
74
|
|
|
75
|
-
The
|
|
75
|
+
The chart below is dedicated to decomposition of postprocessed variant of error over union which takes into consideration cause and effect relationships between different types of segmentation errors. Error over union decomposition has its own pitfalls. It is important to understand that models which tend to produce segment errors (when entire segments are mispredicted and there is no intersection between ground truth and predicted mask) will face less occasions to produce boundary and extent errors - as a result, boundary and extent error over union values will be underestimated.
|
|
76
76
|
|
|
77
77
|
In terms of localization, segment error is more fundamental than extent, while extent error is more fundamental than boundary. In order to overcome this problem, renormalized error over union proposes a slightly different calculation method - by removing more fundamental errors from the denominator - read more in our <a href="{}" target="_blank">technical report</a>
|
|
78
78
|
""".format(
|
|
@@ -1,7 +1,11 @@
|
|
|
1
1
|
from supervisely.nn.benchmark.semantic_segmentation.base_vis_metric import (
|
|
2
2
|
SemanticSegmVisMetric,
|
|
3
3
|
)
|
|
4
|
-
from supervisely.nn.benchmark.visualization.widgets import
|
|
4
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
5
|
+
ChartWidget,
|
|
6
|
+
MarkdownWidget,
|
|
7
|
+
TableWidget,
|
|
8
|
+
)
|
|
5
9
|
|
|
6
10
|
|
|
7
11
|
class KeyMetrics(SemanticSegmVisMetric):
|
|
@@ -14,6 +18,32 @@ class KeyMetrics(SemanticSegmVisMetric):
|
|
|
14
18
|
text=self.vis_texts.markdown_key_metrics,
|
|
15
19
|
)
|
|
16
20
|
|
|
21
|
+
@property
|
|
22
|
+
def table(self) -> TableWidget:
|
|
23
|
+
columns = ["metrics", "values"]
|
|
24
|
+
content = []
|
|
25
|
+
|
|
26
|
+
metrics = self.eval_result.mp.key_metrics().copy()
|
|
27
|
+
metrics["mPixel accuracy"] = round(metrics["mPixel accuracy"] * 100, 2)
|
|
28
|
+
|
|
29
|
+
for metric, value in metrics.items():
|
|
30
|
+
row = [metric, round(value, 2)]
|
|
31
|
+
dct = {"row": row, "id": metric, "items": row}
|
|
32
|
+
content.append(dct)
|
|
33
|
+
|
|
34
|
+
columns_options = [{"disableSort": True}, {"disableSort": True}]
|
|
35
|
+
data = {"columns": columns, "columnsOptions": columns_options, "content": content}
|
|
36
|
+
|
|
37
|
+
table = TableWidget(
|
|
38
|
+
name="table_key_metrics",
|
|
39
|
+
data=data,
|
|
40
|
+
fix_columns=1,
|
|
41
|
+
width="60%",
|
|
42
|
+
show_header_controls=False,
|
|
43
|
+
main_column=columns[0],
|
|
44
|
+
)
|
|
45
|
+
return table
|
|
46
|
+
|
|
17
47
|
@property
|
|
18
48
|
def chart(self) -> ChartWidget:
|
|
19
49
|
return ChartWidget("base_metrics_chart", self.get_figure())
|
|
@@ -26,9 +26,7 @@ class Overview(SemanticSegmVisMetric):
|
|
|
26
26
|
link_text = link_text.replace("_", "\_")
|
|
27
27
|
|
|
28
28
|
model_name = self.eval_result.inference_info.get("model_name") or "Custom"
|
|
29
|
-
checkpoint_name = self.eval_result.
|
|
30
|
-
"checkpoint_name", ""
|
|
31
|
-
)
|
|
29
|
+
checkpoint_name = self.eval_result.checkpoint_name
|
|
32
30
|
|
|
33
31
|
# Note about validation dataset
|
|
34
32
|
classes_str, note_about_images, starter_app_info = self._get_overview_info()
|
|
@@ -93,6 +93,7 @@ class SemanticSegmentationVisualizer(BaseVisualizer):
|
|
|
93
93
|
# key metrics
|
|
94
94
|
key_metrics = KeyMetrics(self.vis_texts, self.eval_result)
|
|
95
95
|
self.key_metrics_md = key_metrics.md
|
|
96
|
+
self.key_metrics_table = key_metrics.table
|
|
96
97
|
self.key_metrics_chart = key_metrics.chart
|
|
97
98
|
|
|
98
99
|
# explore predictions
|
|
@@ -143,15 +144,14 @@ class SemanticSegmentationVisualizer(BaseVisualizer):
|
|
|
143
144
|
self.acknowledgement_md = acknowledgement.md
|
|
144
145
|
|
|
145
146
|
# SpeedTest
|
|
146
|
-
self.speedtest_present = False
|
|
147
|
-
self.speedtest_multiple_batch_sizes = False
|
|
148
147
|
speedtest = Speedtest(self.vis_texts, self.eval_result)
|
|
149
|
-
|
|
150
|
-
|
|
148
|
+
self.speedtest_present = not speedtest.is_empty()
|
|
149
|
+
self.speedtest_multiple_batch_sizes = False
|
|
150
|
+
if self.speedtest_present:
|
|
151
151
|
self.speedtest_md_intro = speedtest.intro_md
|
|
152
152
|
self.speedtest_intro_table = speedtest.intro_table
|
|
153
|
-
|
|
154
|
-
|
|
153
|
+
self.speedtest_multiple_batch_sizes = speedtest.multiple_batche_sizes()
|
|
154
|
+
if self.speedtest_multiple_batch_sizes:
|
|
155
155
|
self.speedtest_batch_inference_md = speedtest.batch_size_md
|
|
156
156
|
self.speedtest_chart = speedtest.chart
|
|
157
157
|
|
|
@@ -166,6 +166,7 @@ class SemanticSegmentationVisualizer(BaseVisualizer):
|
|
|
166
166
|
(0, self.header),
|
|
167
167
|
(1, self.overview_md),
|
|
168
168
|
(1, self.key_metrics_md),
|
|
169
|
+
(0, self.key_metrics_table),
|
|
169
170
|
(0, self.key_metrics_chart),
|
|
170
171
|
(1, self.explore_predictions_md),
|
|
171
172
|
(0, self.explore_predictions_gallery),
|
|
@@ -63,29 +63,11 @@ class Evaluator:
|
|
|
63
63
|
:param boundary_implementation: Choose "exact" for the euclidean pixel distance.
|
|
64
64
|
The Boundary IoU paper uses the L1 distance ("fast").
|
|
65
65
|
"""
|
|
66
|
-
global torch, np, GPU
|
|
66
|
+
global torch, np, GPU, numpy
|
|
67
67
|
import torch # pylint: disable=import-error
|
|
68
68
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
logger.info("Using GPU for evaluation.")
|
|
72
|
-
try:
|
|
73
|
-
# gpu-compatible numpy analogue
|
|
74
|
-
import cupy as np # pylint: disable=import-error
|
|
75
|
-
|
|
76
|
-
global numpy
|
|
77
|
-
import numpy as numpy
|
|
78
|
-
except:
|
|
79
|
-
logger.warning(
|
|
80
|
-
"Failed to import cupy. Use cupy official documentation to install this "
|
|
81
|
-
"module: https://docs.cupy.dev/en/stable/install.html"
|
|
82
|
-
)
|
|
83
|
-
else:
|
|
84
|
-
GPU = False
|
|
85
|
-
import numpy as np
|
|
86
|
-
|
|
87
|
-
global numpy
|
|
88
|
-
numpy = np
|
|
69
|
+
numpy = np
|
|
70
|
+
GPU = False
|
|
89
71
|
|
|
90
72
|
self.progress = progress or tqdm_sly
|
|
91
73
|
self.class_names = class_names
|
|
@@ -20,6 +20,7 @@ class Renderer:
|
|
|
20
20
|
layout: BaseWidget,
|
|
21
21
|
base_dir: str = "./output",
|
|
22
22
|
template: str = None,
|
|
23
|
+
report_name: str = "Model Evaluation Report.lnk",
|
|
23
24
|
) -> None:
|
|
24
25
|
if template is None:
|
|
25
26
|
template = (
|
|
@@ -28,6 +29,9 @@ class Renderer:
|
|
|
28
29
|
self.main_template = template
|
|
29
30
|
self.layout = layout
|
|
30
31
|
self.base_dir = base_dir
|
|
32
|
+
self.report_name = report_name
|
|
33
|
+
self._report = None
|
|
34
|
+
self._lnk = None
|
|
31
35
|
|
|
32
36
|
if Path(base_dir).exists():
|
|
33
37
|
if not dir_empty(base_dir):
|
|
@@ -81,20 +85,31 @@ class Renderer:
|
|
|
81
85
|
change_name_if_conflict=True,
|
|
82
86
|
progress_size_cb=pbar,
|
|
83
87
|
)
|
|
84
|
-
src = self.
|
|
85
|
-
|
|
88
|
+
src = self._save_report_link(api, team_id, remote_dir)
|
|
89
|
+
dst = Path(remote_dir).joinpath(self.report_name)
|
|
90
|
+
self._lnk = api.file.upload(team_id=team_id, src=src, dst=str(dst))
|
|
86
91
|
return remote_dir
|
|
87
92
|
|
|
88
|
-
def
|
|
89
|
-
report_link = self.
|
|
90
|
-
pth = Path(self.base_dir).joinpath(
|
|
93
|
+
def _save_report_link(self, api: Api, team_id: int, remote_dir: str):
|
|
94
|
+
report_link = self._get_report_path(api, team_id, remote_dir)
|
|
95
|
+
pth = Path(self.base_dir).joinpath(self.report_name)
|
|
91
96
|
with open(pth, "w") as f:
|
|
92
97
|
f.write(report_link)
|
|
93
98
|
return str(pth)
|
|
94
99
|
|
|
95
|
-
def
|
|
96
|
-
|
|
97
|
-
|
|
100
|
+
def _get_report_link(self, api: Api, team_id: int, remote_dir: str):
|
|
101
|
+
path = self._get_report_path(api, team_id, remote_dir)
|
|
102
|
+
return f"{api.server_address}{path}"
|
|
98
103
|
|
|
99
|
-
|
|
100
|
-
|
|
104
|
+
def _get_report_path(self, api: Api, team_id: int, remote_dir: str):
|
|
105
|
+
template_path = Path(remote_dir).joinpath("template.vue")
|
|
106
|
+
self._report = api.file.get_info_by_path(team_id, str(template_path))
|
|
107
|
+
return "/model-benchmark?id=" + str(self._report.id)
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def report(self):
|
|
111
|
+
return self._report
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def lnk(self):
|
|
115
|
+
return self._lnk
|
|
@@ -79,6 +79,7 @@ class GalleryWidget(BaseWidget):
|
|
|
79
79
|
column_index=idx % self.columns_number,
|
|
80
80
|
project_meta=project_metas[idx % self.columns_number],
|
|
81
81
|
ignore_tags_filtering=skip_tags_filtering[idx % self.columns_number],
|
|
82
|
+
call_update=idx == len(image_infos) - 1,
|
|
82
83
|
)
|
|
83
84
|
|
|
84
85
|
def _get_init_data(self):
|
|
@@ -133,6 +133,7 @@ class Inference:
|
|
|
133
133
|
if self.INFERENCE_SETTINGS is not None:
|
|
134
134
|
custom_inference_settings = self.INFERENCE_SETTINGS
|
|
135
135
|
else:
|
|
136
|
+
logger.debug("Custom inference settings are not provided.")
|
|
136
137
|
custom_inference_settings = {}
|
|
137
138
|
if isinstance(custom_inference_settings, str):
|
|
138
139
|
if fs.file_exists(custom_inference_settings):
|
|
@@ -14,6 +14,7 @@ from supervisely.nn.training.gui.hyperparameters_selector import Hyperparameters
|
|
|
14
14
|
from supervisely.nn.training.gui.input_selector import InputSelector
|
|
15
15
|
from supervisely.nn.training.gui.model_selector import ModelSelector
|
|
16
16
|
from supervisely.nn.training.gui.train_val_splits_selector import TrainValSplitsSelector
|
|
17
|
+
from supervisely.nn.training.gui.training_artifacts import TrainingArtifacts
|
|
17
18
|
from supervisely.nn.training.gui.training_logs import TrainingLogs
|
|
18
19
|
from supervisely.nn.training.gui.training_process import TrainingProcess
|
|
19
20
|
from supervisely.nn.training.gui.utils import set_stepper_step, wrap_button_click
|
|
@@ -50,7 +51,9 @@ class TrainGUI:
|
|
|
50
51
|
if is_production():
|
|
51
52
|
self.task_id = sly_env.task_id()
|
|
52
53
|
else:
|
|
53
|
-
self.task_id =
|
|
54
|
+
self.task_id = sly_env.task_id(raise_not_found=False)
|
|
55
|
+
if self.task_id is None:
|
|
56
|
+
self.task_id = "debug-session"
|
|
54
57
|
|
|
55
58
|
self.framework_name = framework_name
|
|
56
59
|
self.models = models
|
|
@@ -86,17 +89,22 @@ class TrainGUI:
|
|
|
86
89
|
# 7. Training logs
|
|
87
90
|
self.training_logs = TrainingLogs(self.app_options)
|
|
88
91
|
|
|
92
|
+
# 8. Training Artifacts
|
|
93
|
+
self.training_artifacts = TrainingArtifacts(self.app_options)
|
|
94
|
+
|
|
89
95
|
# Stepper layout
|
|
96
|
+
self.steps = [
|
|
97
|
+
self.input_selector.card,
|
|
98
|
+
self.train_val_splits_selector.card,
|
|
99
|
+
self.classes_selector.card,
|
|
100
|
+
self.model_selector.card,
|
|
101
|
+
self.hyperparameters_selector.card,
|
|
102
|
+
self.training_process.card,
|
|
103
|
+
self.training_logs.card,
|
|
104
|
+
self.training_artifacts.card,
|
|
105
|
+
]
|
|
90
106
|
self.stepper = Stepper(
|
|
91
|
-
widgets=
|
|
92
|
-
self.input_selector.card,
|
|
93
|
-
self.train_val_splits_selector.card,
|
|
94
|
-
self.classes_selector.card,
|
|
95
|
-
self.model_selector.card,
|
|
96
|
-
self.hyperparameters_selector.card,
|
|
97
|
-
self.training_process.card,
|
|
98
|
-
self.training_logs.card,
|
|
99
|
-
],
|
|
107
|
+
widgets=self.steps,
|
|
100
108
|
)
|
|
101
109
|
# ------------------------------------------------- #
|
|
102
110
|
|
|
@@ -265,6 +273,20 @@ class TrainGUI:
|
|
|
265
273
|
|
|
266
274
|
self.layout: Widget = self.stepper
|
|
267
275
|
|
|
276
|
+
def set_next_step(self):
|
|
277
|
+
current_step = self.stepper.get_active_step()
|
|
278
|
+
self.stepper.set_active_step(current_step + 1)
|
|
279
|
+
|
|
280
|
+
def set_previous_step(self):
|
|
281
|
+
current_step = self.stepper.get_active_step()
|
|
282
|
+
self.stepper.set_active_step(current_step - 1)
|
|
283
|
+
|
|
284
|
+
def set_first_step(self):
|
|
285
|
+
self.stepper.set_active_step(1)
|
|
286
|
+
|
|
287
|
+
def set_last_step(self):
|
|
288
|
+
self.stepper.set_active_step(len(self.steps))
|
|
289
|
+
|
|
268
290
|
def enable_select_buttons(self):
|
|
269
291
|
"""
|
|
270
292
|
Makes all select buttons in the GUI available for interaction.
|