supervisely 6.73.254__py3-none-any.whl → 6.73.256__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (61) hide show
  1. supervisely/api/api.py +16 -8
  2. supervisely/api/file_api.py +16 -5
  3. supervisely/api/task_api.py +4 -2
  4. supervisely/app/widgets/field/field.py +10 -7
  5. supervisely/app/widgets/grid_gallery_v2/grid_gallery_v2.py +3 -1
  6. supervisely/io/network_exceptions.py +14 -2
  7. supervisely/nn/benchmark/base_benchmark.py +33 -35
  8. supervisely/nn/benchmark/base_evaluator.py +27 -1
  9. supervisely/nn/benchmark/base_visualizer.py +8 -11
  10. supervisely/nn/benchmark/comparison/base_visualizer.py +147 -0
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py +1 -1
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +5 -7
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +4 -6
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/{explore_predicttions.py → explore_predictions.py} +17 -17
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +3 -5
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +7 -9
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +11 -22
  18. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +3 -5
  19. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +22 -20
  20. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +12 -6
  21. supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +31 -76
  22. supervisely/nn/benchmark/comparison/model_comparison.py +112 -19
  23. supervisely/nn/benchmark/comparison/semantic_segmentation/__init__.py +0 -0
  24. supervisely/nn/benchmark/comparison/semantic_segmentation/text_templates.py +128 -0
  25. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/__init__.py +21 -0
  26. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/classwise_error_analysis.py +68 -0
  27. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/explore_predictions.py +141 -0
  28. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/frequently_confused.py +71 -0
  29. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/iou_eou.py +68 -0
  30. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/overview.py +223 -0
  31. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/renormalized_error_ou.py +57 -0
  32. supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/speedtest.py +314 -0
  33. supervisely/nn/benchmark/comparison/semantic_segmentation/visualizer.py +159 -0
  34. supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -1
  35. supervisely/nn/benchmark/object_detection/evaluator.py +1 -1
  36. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +1 -3
  37. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +3 -0
  38. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +3 -0
  39. supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +1 -1
  40. supervisely/nn/benchmark/object_detection/visualizer.py +5 -10
  41. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +12 -2
  42. supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +8 -9
  43. supervisely/nn/benchmark/semantic_segmentation/text_templates.py +2 -2
  44. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +31 -1
  45. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -3
  46. supervisely/nn/benchmark/semantic_segmentation/visualizer.py +7 -6
  47. supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +3 -21
  48. supervisely/nn/benchmark/visualization/renderer.py +25 -10
  49. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +1 -0
  50. supervisely/nn/inference/inference.py +1 -0
  51. supervisely/nn/training/gui/gui.py +32 -10
  52. supervisely/nn/training/gui/training_artifacts.py +145 -0
  53. supervisely/nn/training/gui/training_process.py +3 -19
  54. supervisely/nn/training/train_app.py +179 -70
  55. {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/METADATA +1 -1
  56. {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/RECORD +60 -48
  57. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/vis_metric.py +0 -19
  58. {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/LICENSE +0 -0
  59. {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/WHEEL +0 -0
  60. {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/entry_points.txt +0 -0
  61. {supervisely-6.73.254.dist-info → supervisely-6.73.256.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,314 @@
1
+ from typing import List, Union
2
+
3
+ from supervisely.imaging.color import hex2rgb
4
+ from supervisely.nn.benchmark.base_visualizer import BaseVisMetrics
5
+ from supervisely.nn.benchmark.visualization.widgets import (
6
+ ChartWidget,
7
+ MarkdownWidget,
8
+ TableWidget,
9
+ )
10
+
11
+
12
+ class Speedtest(BaseVisMetrics):
13
+
14
+ def is_empty(self) -> bool:
15
+ return not any(eval_result.speedtest_info for eval_result in self.eval_results)
16
+
17
+ def multiple_batche_sizes(self) -> bool:
18
+ for eval_result in self.eval_results:
19
+ if eval_result.speedtest_info is None:
20
+ continue
21
+ if len(eval_result.speedtest_info["speedtest"]) > 1:
22
+ return True
23
+ return False
24
+
25
+ @property
26
+ def latency(self) -> List[Union[int, str]]:
27
+ latency = []
28
+ for eval_result in self.eval_results:
29
+ if eval_result.speedtest_info is None:
30
+ latency.append("N/A")
31
+ else:
32
+ added = False
33
+ for test in eval_result.speedtest_info["speedtest"]:
34
+ if test["batch_size"] == 1:
35
+ latency.append(round(test["benchmark"]["total"], 2))
36
+ added = True
37
+ break
38
+ if not added:
39
+ latency.append("N/A")
40
+ return latency
41
+
42
+ @property
43
+ def fps(self) -> List[Union[int, str]]:
44
+ fps = []
45
+ for eval_result in self.eval_results:
46
+ if eval_result.speedtest_info is None:
47
+ fps.append("N/A")
48
+ else:
49
+ added = False
50
+ for test in eval_result.speedtest_info["speedtest"]:
51
+ if test["batch_size"] == 1:
52
+ fps.append(round(1000 / test["benchmark"]["total"], 2))
53
+ added = True
54
+ break
55
+ if not added:
56
+ fps.append("N/A")
57
+ return fps
58
+
59
+ @property
60
+ def md_intro(self) -> MarkdownWidget:
61
+ return MarkdownWidget(
62
+ name="speedtest_intro",
63
+ title="Inference Speed",
64
+ text=self.vis_texts.markdown_speedtest_intro,
65
+ )
66
+
67
+ @property
68
+ def intro_table(self) -> TableWidget:
69
+ columns = ["Model", "Device", "Hardware", "Runtime"]
70
+ columns_options = [{"disableSort": True} for _ in columns]
71
+ content = []
72
+ for i, eval_result in enumerate(self.eval_results, 1):
73
+ name = f"[{i}] {eval_result.name}"
74
+ if eval_result.speedtest_info is None:
75
+ row = [name, "N/A", "N/A", "N/A"]
76
+ dct = {
77
+ "row": row,
78
+ "id": name,
79
+ "items": row,
80
+ }
81
+ content.append(dct)
82
+ continue
83
+ model_info = eval_result.speedtest_info.get("model_info", {})
84
+ device = model_info.get("device", "N/A")
85
+ hardware = model_info.get("hardware", "N/A")
86
+ runtime = model_info.get("runtime", "N/A")
87
+ row = [name, device, hardware, runtime]
88
+ dct = {
89
+ "row": row,
90
+ "id": name,
91
+ "items": row,
92
+ }
93
+ content.append(dct)
94
+
95
+ data = {
96
+ "columns": columns,
97
+ "columnsOptions": columns_options,
98
+ "content": content,
99
+ }
100
+ return TableWidget(
101
+ name="speedtest_intro_table",
102
+ data=data,
103
+ show_header_controls=False,
104
+ fix_columns=1,
105
+ )
106
+
107
+ @property
108
+ def inference_time_md(self) -> MarkdownWidget:
109
+ text = self.vis_texts.markdown_speedtest_overview_ms.format(100)
110
+ return MarkdownWidget(
111
+ name="inference_time_md",
112
+ title="Overview",
113
+ text=text,
114
+ )
115
+
116
+ @property
117
+ def fps_md(self) -> MarkdownWidget:
118
+ text = self.vis_texts.markdown_speedtest_overview_fps.format(100)
119
+ return MarkdownWidget(
120
+ name="fps_md",
121
+ title="FPS Table",
122
+ text=text,
123
+ )
124
+
125
+ @property
126
+ def fps_table(self) -> TableWidget:
127
+ data = {}
128
+ batch_sizes = set()
129
+ max_fps = 0
130
+ for i, eval_result in enumerate(self.eval_results, 1):
131
+ data[i] = {}
132
+ if eval_result.speedtest_info is None:
133
+ continue
134
+ speedtests = eval_result.speedtest_info["speedtest"]
135
+ for test in speedtests:
136
+ batch_size = test["batch_size"]
137
+ fps = round(1000 / test["benchmark"]["total"] * batch_size)
138
+ batch_sizes.add(batch_size)
139
+ max_fps = max(max_fps, fps)
140
+ data[i][batch_size] = fps
141
+
142
+ batch_sizes = sorted(batch_sizes)
143
+ columns = ["Model"]
144
+ columns_options = [{"disableSort": True}]
145
+ for batch_size in batch_sizes:
146
+ columns.append(f"Batch size {batch_size}")
147
+ columns_options.append(
148
+ {
149
+ "subtitle": "imgs/sec",
150
+ "tooltip": "Frames (images) per second",
151
+ "postfix": "fps",
152
+ # "maxValue": max_fps,
153
+ }
154
+ )
155
+
156
+ content = []
157
+ for i, eval_result in enumerate(self.eval_results, 1):
158
+ name = f"[{i}] {eval_result.name}"
159
+ row = [name]
160
+ for batch_size in batch_sizes:
161
+ if batch_size in data[i]:
162
+ row.append(data[i][batch_size])
163
+ else:
164
+ row.append("―")
165
+ content.append(
166
+ {
167
+ "row": row,
168
+ "id": name,
169
+ "items": row,
170
+ }
171
+ )
172
+ data = {
173
+ "columns": columns,
174
+ "columnsOptions": columns_options,
175
+ "content": content,
176
+ }
177
+ return TableWidget(
178
+ name="fps_table",
179
+ data=data,
180
+ show_header_controls=False,
181
+ fix_columns=1,
182
+ )
183
+
184
+ @property
185
+ def inference_time_table(self) -> TableWidget:
186
+ data = {}
187
+ batch_sizes = set()
188
+ for i, eval_result in enumerate(self.eval_results, 1):
189
+ data[i] = {}
190
+ if eval_result.speedtest_info is None:
191
+ continue
192
+ speedtests = eval_result.speedtest_info["speedtest"]
193
+ for test in speedtests:
194
+ batch_size = test["batch_size"]
195
+ ms = round(test["benchmark"]["total"], 2)
196
+ batch_sizes.add(batch_size)
197
+ data[i][batch_size] = ms
198
+
199
+ batch_sizes = sorted(batch_sizes)
200
+ columns = ["Model"]
201
+ columns_options = [{"disableSort": True}]
202
+ for batch_size in batch_sizes:
203
+ columns.extend([f"Batch size {batch_size}"])
204
+ columns_options.extend(
205
+ [
206
+ {"subtitle": "ms", "tooltip": "Milliseconds for batch images", "postfix": "ms"},
207
+ ]
208
+ )
209
+
210
+ content = []
211
+ for i, eval_result in enumerate(self.eval_results, 1):
212
+ name = f"[{i}] {eval_result.name}"
213
+ row = [name]
214
+ for batch_size in batch_sizes:
215
+ if batch_size in data[i]:
216
+ row.append(data[i][batch_size])
217
+ else:
218
+ row.append("―")
219
+ content.append(
220
+ {
221
+ "row": row,
222
+ "id": name,
223
+ "items": row,
224
+ }
225
+ )
226
+
227
+ data = {
228
+ "columns": columns,
229
+ "columnsOptions": columns_options,
230
+ "content": content,
231
+ }
232
+ return TableWidget(
233
+ name="inference_time_md",
234
+ data=data,
235
+ show_header_controls=False,
236
+ fix_columns=1,
237
+ )
238
+
239
+ @property
240
+ def batch_inference_md(self):
241
+ return MarkdownWidget(
242
+ name="batch_inference",
243
+ title="Batch Inference",
244
+ text=self.vis_texts.markdown_batch_inference,
245
+ )
246
+
247
+ @property
248
+ def chart(self) -> ChartWidget:
249
+ return ChartWidget(name="speed_charts", figure=self.get_figure())
250
+
251
+ def get_figure(self): # -> Optional[go.Figure]
252
+ import plotly.graph_objects as go # pylint: disable=import-error
253
+ from plotly.subplots import make_subplots # pylint: disable=import-error
254
+
255
+ fig = make_subplots(cols=2)
256
+
257
+ for idx, eval_result in enumerate(self.eval_results, 1):
258
+ if eval_result.speedtest_info is None:
259
+ continue
260
+ temp_res = {}
261
+ for test in eval_result.speedtest_info["speedtest"]:
262
+ batch_size = test["batch_size"]
263
+
264
+ std = test["benchmark_std"]["total"]
265
+ ms = test["benchmark"]["total"]
266
+ fps = round(1000 / test["benchmark"]["total"] * batch_size)
267
+
268
+ ms_line = temp_res.setdefault("ms", {})
269
+ fps_line = temp_res.setdefault("fps", {})
270
+ ms_std_line = temp_res.setdefault("ms_std", {})
271
+
272
+ ms_line[batch_size] = ms
273
+ fps_line[batch_size] = fps
274
+ ms_std_line[batch_size] = round(std, 2)
275
+
276
+ error_color = "rgba(" + ",".join(map(str, hex2rgb(eval_result.color))) + ", 0.5)"
277
+ fig.add_trace(
278
+ go.Scatter(
279
+ x=list(temp_res["ms"].keys()),
280
+ y=list(temp_res["ms"].values()),
281
+ name=f"[{idx}] {eval_result.name} (ms)",
282
+ line=dict(color=eval_result.color),
283
+ customdata=list(temp_res["ms_std"].values()),
284
+ error_y=dict(
285
+ type="data",
286
+ array=list(temp_res["ms_std"].values()),
287
+ visible=True,
288
+ color=error_color,
289
+ ),
290
+ hovertemplate="Batch Size: %{x}<br>Time: %{y:.2f} ms<br> Standard deviation: %{customdata:.2f} ms<extra></extra>",
291
+ ),
292
+ col=1,
293
+ row=1,
294
+ )
295
+ fig.add_trace(
296
+ go.Scatter(
297
+ x=list(temp_res["fps"].keys()),
298
+ y=list(temp_res["fps"].values()),
299
+ name=f"[{idx}] {eval_result.name} (fps)",
300
+ line=dict(color=eval_result.color),
301
+ hovertemplate="Batch Size: %{x}<br>FPS: %{y:.2f}<extra></extra>", # <br> Standard deviation: %{customdata:.2f}<extra></extra>",
302
+ ),
303
+ col=2,
304
+ row=1,
305
+ )
306
+
307
+ fig.update_xaxes(title_text="Batch size", col=1, dtick=1)
308
+ fig.update_xaxes(title_text="Batch size", col=2, dtick=1)
309
+
310
+ fig.update_yaxes(title_text="Time (ms)", col=1)
311
+ fig.update_yaxes(title_text="FPS", col=2)
312
+ fig.update_layout(height=400)
313
+
314
+ return fig
@@ -0,0 +1,159 @@
1
+ from typing import List
2
+
3
+ import supervisely.nn.benchmark.comparison.semantic_segmentation.text_templates as texts
4
+ from supervisely.nn.benchmark.comparison.base_visualizer import BaseComparisonVisualizer
5
+ from supervisely.nn.benchmark.comparison.semantic_segmentation.vis_metrics import (
6
+ ClasswiseErrorAnalysis,
7
+ ExplorePredictions,
8
+ FrequentlyConfused,
9
+ IntersectionErrorOverUnion,
10
+ Overview,
11
+ RenormalizedErrorOverUnion,
12
+ Speedtest,
13
+ )
14
+ from supervisely.nn.benchmark.semantic_segmentation.evaluator import (
15
+ SemanticSegmentationEvalResult,
16
+ )
17
+ from supervisely.nn.benchmark.visualization.widgets import (
18
+ ContainerWidget,
19
+ MarkdownWidget,
20
+ SidebarWidget,
21
+ )
22
+
23
+
24
+ class SemanticSegmentationComparisonVisualizer(BaseComparisonVisualizer):
25
+ vis_texts = texts
26
+ ann_opacity = 0.7
27
+
28
+ def __init__(self, *args, **kwargs):
29
+ super().__init__(*args, **kwargs)
30
+ self.eval_results: List[SemanticSegmentationEvalResult]
31
+
32
+ def _create_widgets(self):
33
+ # Modal Gellery
34
+ self.diff_modal = self._create_diff_modal_table()
35
+ self.explore_modal = self._create_explore_modal_table(
36
+ click_gallery_id=self.diff_modal.id, hover_text="Compare with GT"
37
+ )
38
+
39
+ # Notifcation
40
+ self.clickable_label = self._create_clickable_label()
41
+
42
+ # Speedtest init here for overview
43
+ speedtest = Speedtest(self.vis_texts, self.comparison.eval_results)
44
+
45
+ # Overview
46
+ overview = Overview(self.vis_texts, self.comparison.eval_results)
47
+ overview.team_id = self.comparison.team_id
48
+ self.header = self._create_header()
49
+ self.overviews = self._create_overviews(overview, grid_cols=2)
50
+ self.overview_md = overview.overview_md
51
+ self.key_metrics_md = self._create_key_metrics()
52
+ self.key_metrics_table = overview.get_table_widget(
53
+ latency=speedtest.latency, fps=speedtest.fps
54
+ )
55
+ self.overview_chart = overview.chart_widget
56
+
57
+ # Explore Predictions
58
+ columns_number = len(self.comparison.eval_results) + 1 # +1 for GT
59
+ self.explore_predictions_modal_gallery = self._create_explore_modal_table(columns_number)
60
+ explore_predictions = ExplorePredictions(
61
+ self.vis_texts,
62
+ self.comparison.eval_results,
63
+ explore_modal_table=self.explore_predictions_modal_gallery,
64
+ )
65
+ self.explore_predictions_md = explore_predictions.difference_predictions_md
66
+ self.explore_predictions_gallery = explore_predictions.explore_gallery
67
+
68
+ # IntersectionErrorOverUnion
69
+ iou_eou = IntersectionErrorOverUnion(self.vis_texts, self.comparison.eval_results)
70
+ self.iou_eou_md = iou_eou.md
71
+ self.iou_eou_chart = iou_eou.chart
72
+
73
+ # RenormalizedErrorOverUnion
74
+ reou = RenormalizedErrorOverUnion(self.vis_texts, self.comparison.eval_results)
75
+ self.reou_md = reou.md
76
+ self.reou_chart = reou.chart
77
+
78
+ # ClasswiseErrorAnalysis
79
+ classwise_ea = ClasswiseErrorAnalysis(self.vis_texts, self.comparison.eval_results)
80
+ self.classwise_ea_md = classwise_ea.md
81
+ self.classwise_ea_chart = classwise_ea.chart
82
+
83
+ # FrequentlyConfused
84
+ frequently_confused = FrequentlyConfused(self.vis_texts, self.comparison.eval_results)
85
+ self.frequently_confused_md = frequently_confused.md
86
+ self.frequently_confused_chart = frequently_confused.chart
87
+
88
+ # # SpeedTest
89
+ self.speedtest_present = not speedtest.is_empty()
90
+ self.speedtest_multiple_batch_sizes = False
91
+
92
+ if self.speedtest_present:
93
+ self.speedtest_md_intro = speedtest.md_intro
94
+ self.speedtest_intro_table = speedtest.intro_table
95
+ self.speed_inference_time_md = speedtest.inference_time_md
96
+ self.speed_inference_time_table = speedtest.inference_time_table
97
+ self.speed_fps_md = speedtest.fps_md
98
+ self.speed_fps_table = speedtest.fps_table
99
+ self.speedtest_multiple_batch_sizes = speedtest.multiple_batche_sizes()
100
+ if self.speedtest_multiple_batch_sizes:
101
+ self.speed_batch_inference_md = speedtest.batch_inference_md
102
+ self.speed_chart = speedtest.chart
103
+
104
+ def _create_layout(self):
105
+ is_anchors_widgets = [
106
+ # Overview
107
+ (0, self.header),
108
+ (1, self.overview_md),
109
+ (0, self.overviews),
110
+ (1, self.key_metrics_md),
111
+ (0, self.key_metrics_table),
112
+ (0, self.overview_chart),
113
+ # Explore Predictions
114
+ (1, self.explore_predictions_md),
115
+ (0, self.explore_predictions_gallery),
116
+ # IntersectionErrorOverUnion
117
+ (1, self.iou_eou_md),
118
+ (0, self.iou_eou_chart),
119
+ # RenormalizedErrorOverUnion
120
+ (1, self.reou_md),
121
+ (0, self.reou_chart),
122
+ # ClasswiseErrorAnalysis
123
+ (1, self.classwise_ea_md),
124
+ (0, self.classwise_ea_chart),
125
+ # FrequentlyConfused
126
+ (1, self.frequently_confused_md),
127
+ (0, self.frequently_confused_chart),
128
+ ]
129
+ if self.speedtest_present:
130
+ is_anchors_widgets.extend(
131
+ [
132
+ # SpeedTest
133
+ (1, self.speedtest_md_intro),
134
+ (0, self.speedtest_intro_table),
135
+ (0, self.speed_inference_time_md),
136
+ (0, self.speed_inference_time_table),
137
+ (0, self.speed_fps_md),
138
+ (0, self.speed_fps_table),
139
+ ]
140
+ )
141
+ if self.speedtest_multiple_batch_sizes:
142
+ is_anchors_widgets.append((0, self.speed_batch_inference_md))
143
+ is_anchors_widgets.append((0, self.speed_chart))
144
+ anchors = []
145
+ for is_anchor, widget in is_anchors_widgets:
146
+ if is_anchor:
147
+ anchors.append(widget.id)
148
+
149
+ sidebar = SidebarWidget(widgets=[i[1] for i in is_anchors_widgets], anchors=anchors)
150
+ layout = ContainerWidget(
151
+ widgets=[sidebar, self.explore_modal, self.explore_predictions_modal_gallery],
152
+ name="main_container",
153
+ )
154
+ return layout
155
+
156
+ def _create_key_metrics(self) -> MarkdownWidget:
157
+ return MarkdownWidget(
158
+ "markdown_key_metrics", "Key Metrics", text=self.vis_texts.markdown_key_metrics
159
+ )
@@ -39,7 +39,7 @@ class InstanceSegmentationEvaluator(ObjectDetectionEvaluator):
39
39
  except AssertionError as e:
40
40
  raise ValueError(
41
41
  f"{e}. Please make sure that your GT and DT projects are correct. "
42
- "If GT project has nested datasets and DT project was crated with NN app, "
42
+ "If GT project has nested datasets and DT project was created with NN app, "
43
43
  "try to use newer version of NN app."
44
44
  )
45
45
 
@@ -104,7 +104,7 @@ class ObjectDetectionEvaluator(BaseEvaluator):
104
104
  except AssertionError as e:
105
105
  raise ValueError(
106
106
  f"{e}. Please make sure that your GT and DT projects are correct. "
107
- "If GT project has nested datasets and DT project was crated with NN app, "
107
+ "If GT project has nested datasets and DT project was created with NN app, "
108
108
  "try to use newer version of NN app."
109
109
  )
110
110
  self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
@@ -24,9 +24,7 @@ class Overview(DetectionVisMetric):
24
24
  link_text = link_text.replace("_", "\_")
25
25
 
26
26
  model_name = self.eval_result.inference_info.get("model_name") or "Custom"
27
- checkpoint_name = self.eval_result.inference_info.get("deploy_params", {}).get(
28
- "checkpoint_name", ""
29
- )
27
+ checkpoint_name = self.eval_result.checkpoint_name
30
28
 
31
29
  # Note about validation dataset
32
30
  classes_str, note_about_images, starter_app_info = self._get_overview_info()
@@ -69,4 +69,7 @@ class Precision(DetectionVisMetric):
69
69
  )
70
70
  fig.update_xaxes(title_text="Class")
71
71
  fig.update_yaxes(title_text="Precision", range=[0, 1])
72
+ fig.update_layout(
73
+ width=700 if len(sorted_by_precision) < 10 else None,
74
+ )
72
75
  return fig
@@ -68,4 +68,7 @@ class Recall(DetectionVisMetric):
68
68
  )
69
69
  fig.update_xaxes(title_text="Class")
70
70
  fig.update_yaxes(title_text="Recall", range=[0, 1])
71
+ fig.update_layout(
72
+ width=700 if len(sorted_by_f1) < 10 else None,
73
+ )
71
74
  return fig
@@ -50,7 +50,7 @@ class RecallVsPrecision(DetectionVisMetric):
50
50
  marker=dict(color=orange_color),
51
51
  )
52
52
  )
53
- fig.update_layout(barmode="group")
53
+ fig.update_layout(barmode="group", width=800 if len(sorted_by_f1) < 10 else None)
54
54
  fig.update_xaxes(title_text="Class")
55
55
  fig.update_yaxes(title_text="Value", range=[0, 1])
56
56
  return fig
@@ -56,21 +56,17 @@ class ObjectDetectionVisualizer(BaseVisualizer):
56
56
  self._widgets = False
57
57
  self.ann_opacity = 0.4
58
58
 
59
- diff_project_info, diff_dataset_infos, existed = self._get_or_create_diff_project()
59
+ diff_project_info, diff_dataset_infos, _ = self._get_or_create_diff_project()
60
60
  self.eval_result.diff_project_info = diff_project_info
61
61
  self.eval_result.diff_dataset_infos = diff_dataset_infos
62
62
  self.eval_result.matched_pair_data = {}
63
63
 
64
64
  self.gt_project_path = str(Path(self.workdir).parent / "gt_project")
65
65
  self.pred_project_path = str(Path(self.workdir).parent / "pred_project")
66
- if not existed:
67
- self.update_diff_annotations()
68
- else:
69
- self._init_match_data()
66
+ self.update_diff_annotations()
70
67
 
71
68
  # set filtered project meta
72
69
  self.eval_result.filtered_project_meta = self._get_filtered_project_meta(self.eval_result)
73
-
74
70
  self._get_sample_data_for_gallery()
75
71
 
76
72
  @property
@@ -222,10 +218,9 @@ class ObjectDetectionVisualizer(BaseVisualizer):
222
218
 
223
219
  # Speedtest init here for overview
224
220
  speedtest = Speedtest(self.vis_texts, self.eval_result)
225
- self.speedtest_present = False
221
+ self.speedtest_present = not speedtest.is_empty()
226
222
  self.speedtest_batch_sizes_cnt = speedtest.num_batche_sizes
227
- if not speedtest.is_empty():
228
- self.speedtest_present = True
223
+ if self.speedtest_present:
229
224
  self.speedtest_md_intro = speedtest.intro_md
230
225
  self.speedtest_table_md = speedtest.table_md
231
226
  self.speedtest_table = speedtest.table
@@ -393,7 +388,7 @@ class ObjectDetectionVisualizer(BaseVisualizer):
393
388
 
394
389
  pred_tag_list = []
395
390
  with self.pbar(
396
- message="Visualizations: Creating diff_project", total=pred_project.total_items
391
+ message="Visualizations: Creating difference project", total=pred_project.total_items
397
392
  ) as progress:
398
393
  logger.debug(
399
394
  "Creating diff project data",
@@ -97,7 +97,13 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
97
97
  meta_path = Path(project_path) / "meta.json"
98
98
  meta = ProjectMeta.from_json(load_json_file(meta_path))
99
99
 
100
- palette = [obj.color for obj in meta.obj_classes if obj.name in self.classes_whitelist]
100
+ palette = []
101
+ for cls_name in self.classes_whitelist:
102
+ obj_cls = meta.get_obj_class(cls_name)
103
+ if obj_cls is None:
104
+ palette.append((0, 0, 0))
105
+ else:
106
+ palette.append(obj_cls.color)
101
107
 
102
108
  return palette
103
109
 
@@ -123,7 +129,11 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
123
129
  continue
124
130
 
125
131
  palette = self._get_palette(src_dir)
126
- bg_color = palette[self.classes_whitelist.index(self.bg_cls_name)]
132
+ bg_cls_idx = self.classes_whitelist.index(self.bg_cls_name)
133
+ try:
134
+ bg_color = palette[bg_cls_idx]
135
+ except IndexError:
136
+ bg_color = (0, 0, 0)
127
137
  output_dir.mkdir(parents=True)
128
138
  temp_seg_dir = src_dir + "_temp"
129
139
  if not os.path.exists(temp_seg_dir):
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict
1
+ from typing import Any, Dict, Optional
2
2
 
3
3
  import numpy as np
4
4
 
@@ -65,7 +65,7 @@ class MetricProvider:
65
65
 
66
66
  # frequently confused classes
67
67
  self.frequently_confused = self.get_frequently_confused(
68
- eval_data["confusion_matrix"].copy()
68
+ eval_data["confusion_matrix"].copy(), n_pairs=20
69
69
  )
70
70
 
71
71
  def json_metrics(self):
@@ -114,18 +114,17 @@ class MetricProvider:
114
114
  confusion_matrix = confusion_matrix[::-1]
115
115
  return confusion_matrix, class_names
116
116
 
117
- def get_frequently_confused(self, confusion_matrix: np.ndarray):
118
- n_pairs = 20
117
+ def get_frequently_confused(self, confusion_matrix: np.ndarray, n_pairs: Optional[int] = None):
119
118
 
120
- non_diagonal_indexes = {}
119
+ non_diagonal_ids = {}
121
120
  for i, idx in enumerate(np.ndindex(confusion_matrix.shape)):
122
121
  if idx[0] != idx[1]:
123
- non_diagonal_indexes[i] = idx
122
+ non_diagonal_ids[i] = idx
124
123
 
125
124
  indexes_1d = np.argsort(confusion_matrix, axis=None)
126
- indexes_2d = [
127
- non_diagonal_indexes[idx] for idx in indexes_1d if idx in non_diagonal_indexes
128
- ][-n_pairs:]
125
+ indexes_2d = [non_diagonal_ids[idx] for idx in indexes_1d if idx in non_diagonal_ids]
126
+ if n_pairs is not None:
127
+ indexes_2d = indexes_2d[:n_pairs]
129
128
  indexes_2d = np.asarray(indexes_2d[::-1])
130
129
 
131
130
  rows = indexes_2d[:, 0]
@@ -40,7 +40,7 @@ We provide a comprehensive model performance analysis using a set of metrics, in
40
40
 
41
41
  - **Pixel accuracy**: reflects the percent of image pixels which were correctly classified.
42
42
  - **Precision**: reflects the number of correctly predicted positive segmentations divided by the total number of predicted positive segmentations.
43
- - **Recall8**: reflects the number of correctly predicted positive segmentations divided by the number of all samples that should have been segmented as positive.
43
+ - **Recall**: reflects the number of correctly predicted positive segmentations divided by the number of all samples that should have been segmented as positive.
44
44
  - **F1-score**: reflects the tradeoff between precision and recall. It is equivalent to the Dice coefficient and calculated as a harmonic mean of precision and recall.
45
45
  - **Intersection over union (IoU, also known as the Jaccard index)**: measures the overlap between ground truth mask and predicted mask. It is calculated as the ratio of the intersection of the two masks areas to their combined areas.
46
46
  - **Boundary intersection over union**: a segmentation consistency measure that first computes the sets of ground truth and predicted masks pixels that are located within the distance d from each contour and then computes intersection over union of these two sets. Pixel distance parameter d (pixel width of the boundary region) controls the sensitivity of the metric, it is usually set as 2% of the image diagonal for normal resolution images and 0.5% of the image diagonal for high resolution images.
@@ -72,7 +72,7 @@ The pie chart below demonstrates what the model lacked in order to show the perf
72
72
 
73
73
  markdown_renormalized_error_ou = """## Renormalized Error Over Union
74
74
 
75
- The pie chart below is dedicated to decomposition of postprocessed variant of error over union which takes into consideration cause and effect relationships between different types of segmentation errors. Error over union decomposition has its own pitfalls. It is important to understand that models which tend to produce segment errors (when entire segments are mispredicted and there is no intersection between ground truth and predicted mask) will face less occasions to produce boundary and extent errors - as a result, boundary and extent error over union values will be underestimated.
75
+ The chart below is dedicated to decomposition of postprocessed variant of error over union which takes into consideration cause and effect relationships between different types of segmentation errors. Error over union decomposition has its own pitfalls. It is important to understand that models which tend to produce segment errors (when entire segments are mispredicted and there is no intersection between ground truth and predicted mask) will face less occasions to produce boundary and extent errors - as a result, boundary and extent error over union values will be underestimated.
76
76
 
77
77
  In terms of localization, segment error is more fundamental than extent, while extent error is more fundamental than boundary. In order to overcome this problem, renormalized error over union proposes a slightly different calculation method - by removing more fundamental errors from the denominator - read more in our <a href="{}" target="_blank">technical report</a>
78
78
  """.format(