supervisely 6.73.214__py3-none-any.whl → 6.73.216__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (52) hide show
  1. supervisely/app/fastapi/templating.py +1 -1
  2. supervisely/app/widgets/report_thumbnail/report_thumbnail.py +17 -5
  3. supervisely/app/widgets/team_files_selector/team_files_selector.py +3 -0
  4. supervisely/nn/artifacts/__init__.py +1 -0
  5. supervisely/nn/artifacts/rtdetr.py +32 -0
  6. supervisely/nn/benchmark/comparison/__init__.py +0 -0
  7. supervisely/nn/benchmark/comparison/detection_visualization/__init__.py +0 -0
  8. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +437 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py +27 -0
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +125 -0
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +224 -0
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +112 -0
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +161 -0
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +336 -0
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +249 -0
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +142 -0
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +300 -0
  18. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +308 -0
  19. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/vis_metric.py +19 -0
  20. supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +298 -0
  21. supervisely/nn/benchmark/comparison/model_comparison.py +84 -0
  22. supervisely/nn/benchmark/evaluation/coco/metric_provider.py +9 -7
  23. supervisely/nn/benchmark/visualization/evaluation_result.py +266 -0
  24. supervisely/nn/benchmark/visualization/renderer.py +100 -0
  25. supervisely/nn/benchmark/visualization/report_template.html +46 -0
  26. supervisely/nn/benchmark/visualization/visualizer.py +1 -1
  27. supervisely/nn/benchmark/visualization/widgets/__init__.py +17 -0
  28. supervisely/nn/benchmark/visualization/widgets/chart/__init__.py +0 -0
  29. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +72 -0
  30. supervisely/nn/benchmark/visualization/widgets/chart/template.html +16 -0
  31. supervisely/nn/benchmark/visualization/widgets/collapse/__init__.py +0 -0
  32. supervisely/nn/benchmark/visualization/widgets/collapse/collapse.py +33 -0
  33. supervisely/nn/benchmark/visualization/widgets/container/__init__.py +0 -0
  34. supervisely/nn/benchmark/visualization/widgets/container/container.py +54 -0
  35. supervisely/nn/benchmark/visualization/widgets/gallery/__init__.py +0 -0
  36. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +125 -0
  37. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +49 -0
  38. supervisely/nn/benchmark/visualization/widgets/markdown/__init__.py +0 -0
  39. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +53 -0
  40. supervisely/nn/benchmark/visualization/widgets/notification/__init__.py +0 -0
  41. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +38 -0
  42. supervisely/nn/benchmark/visualization/widgets/sidebar/__init__.py +0 -0
  43. supervisely/nn/benchmark/visualization/widgets/sidebar/sidebar.py +67 -0
  44. supervisely/nn/benchmark/visualization/widgets/table/__init__.py +0 -0
  45. supervisely/nn/benchmark/visualization/widgets/table/table.py +116 -0
  46. supervisely/nn/benchmark/visualization/widgets/widget.py +22 -0
  47. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/METADATA +1 -1
  48. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/RECORD +52 -12
  49. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/LICENSE +0 -0
  50. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/WHEEL +0 -0
  51. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/entry_points.txt +0 -0
  52. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,308 @@
1
+ from typing import List, Union
2
+
3
+ from supervisely.imaging.color import hex2rgb
4
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.vis_metric import (
5
+ BaseVisMetric,
6
+ )
7
+ from supervisely.nn.benchmark.visualization.widgets import (
8
+ ChartWidget,
9
+ MarkdownWidget,
10
+ TableWidget,
11
+ )
12
+
13
+
14
+ class Speedtest(BaseVisMetric):
15
+
16
+ def is_empty(self) -> bool:
17
+ return not any(eval_result.speedtest_info for eval_result in self.eval_results)
18
+
19
+ @property
20
+ def latency(self) -> List[Union[int, str]]:
21
+ latency = []
22
+ for eval_result in self.eval_results:
23
+ if eval_result.speedtest_info is None:
24
+ latency.append("N/A")
25
+ else:
26
+ added = False
27
+ for test in eval_result.speedtest_info["speedtest"]:
28
+ if test["batch_size"] == 1:
29
+ latency.append(round(test["benchmark"]["total"], 2))
30
+ added = True
31
+ break
32
+ if not added:
33
+ latency.append("N/A")
34
+ return latency
35
+
36
+ @property
37
+ def fps(self) -> List[Union[int, str]]:
38
+ fps = []
39
+ for eval_result in self.eval_results:
40
+ if eval_result.speedtest_info is None:
41
+ fps.append("N/A")
42
+ else:
43
+ added = False
44
+ for test in eval_result.speedtest_info["speedtest"]:
45
+ if test["batch_size"] == 1:
46
+ fps.append(round(1000 / test["benchmark"]["total"], 2))
47
+ added = True
48
+ break
49
+ if not added:
50
+ fps.append("N/A")
51
+ return fps
52
+
53
+ @property
54
+ def md_intro(self) -> MarkdownWidget:
55
+ return MarkdownWidget(
56
+ name="speedtest_intro",
57
+ title="Inference Speed",
58
+ text=self.vis_texts.markdown_speedtest_intro,
59
+ )
60
+
61
+ @property
62
+ def intro_table(self) -> TableWidget:
63
+ columns = ["Model", "Device", "Hardware", "Runtime"]
64
+ columns_options = [{"disableSort": True} for _ in columns]
65
+ content = []
66
+ for i, eval_result in enumerate(self.eval_results, 1):
67
+ name = f"[{i}] {eval_result.name}"
68
+ if eval_result.speedtest_info is None:
69
+ row = [name, "N/A", "N/A", "N/A"]
70
+ dct = {
71
+ "row": row,
72
+ "id": name,
73
+ "items": row,
74
+ }
75
+ content.append(dct)
76
+ continue
77
+ model_info = eval_result.speedtest_info.get("model_info", {})
78
+ device = model_info.get("device", "N/A")
79
+ hardware = model_info.get("hardware", "N/A")
80
+ runtime = model_info.get("runtime", "N/A")
81
+ row = [name, device, hardware, runtime]
82
+ dct = {
83
+ "row": row,
84
+ "id": name,
85
+ "items": row,
86
+ }
87
+ content.append(dct)
88
+
89
+ data = {
90
+ "columns": columns,
91
+ "columnsOptions": columns_options,
92
+ "content": content,
93
+ }
94
+ return TableWidget(
95
+ name="speedtest_intro_table",
96
+ data=data,
97
+ show_header_controls=False,
98
+ fix_columns=1,
99
+ )
100
+
101
+ @property
102
+ def inference_time_md(self) -> MarkdownWidget:
103
+ text = self.vis_texts.markdown_speedtest_overview_ms.format(100)
104
+ return MarkdownWidget(
105
+ name="inference_time_md",
106
+ title="Overview",
107
+ text=text,
108
+ )
109
+
110
+ @property
111
+ def fps_md(self) -> MarkdownWidget:
112
+ text = self.vis_texts.markdown_speedtest_overview_fps.format(100)
113
+ return MarkdownWidget(
114
+ name="fps_md",
115
+ title="FPS Table",
116
+ text=text,
117
+ )
118
+
119
+ @property
120
+ def fps_table(self) -> TableWidget:
121
+ data = {}
122
+ batch_sizes = set()
123
+ max_fps = 0
124
+ for i, eval_result in enumerate(self.eval_results, 1):
125
+ data[i] = {}
126
+ if eval_result.speedtest_info is None:
127
+ continue
128
+ speedtests = eval_result.speedtest_info["speedtest"]
129
+ for test in speedtests:
130
+ batch_size = test["batch_size"]
131
+ fps = round(1000 / test["benchmark"]["total"] * batch_size)
132
+ batch_sizes.add(batch_size)
133
+ max_fps = max(max_fps, fps)
134
+ data[i][batch_size] = fps
135
+
136
+ batch_sizes = sorted(batch_sizes)
137
+ columns = ["Model"]
138
+ columns_options = [{"disableSort": True}]
139
+ for batch_size in batch_sizes:
140
+ columns.append(f"Batch size {batch_size}")
141
+ columns_options.append(
142
+ {
143
+ "subtitle": "imgs/sec",
144
+ "tooltip": "Frames (images) per second",
145
+ "postfix": "fps",
146
+ # "maxValue": max_fps,
147
+ }
148
+ )
149
+
150
+ content = []
151
+ for i, eval_result in enumerate(self.eval_results, 1):
152
+ name = f"[{i}] {eval_result.name}"
153
+ row = [name]
154
+ for batch_size in batch_sizes:
155
+ if batch_size in data[i]:
156
+ row.append(data[i][batch_size])
157
+ else:
158
+ row.append("―")
159
+ content.append(
160
+ {
161
+ "row": row,
162
+ "id": name,
163
+ "items": row,
164
+ }
165
+ )
166
+ data = {
167
+ "columns": columns,
168
+ "columnsOptions": columns_options,
169
+ "content": content,
170
+ }
171
+ return TableWidget(
172
+ name="fps_table",
173
+ data=data,
174
+ show_header_controls=False,
175
+ fix_columns=1,
176
+ )
177
+
178
+ @property
179
+ def inference_time_table(self) -> TableWidget:
180
+ data = {}
181
+ batch_sizes = set()
182
+ for i, eval_result in enumerate(self.eval_results, 1):
183
+ data[i] = {}
184
+ if eval_result.speedtest_info is None:
185
+ continue
186
+ speedtests = eval_result.speedtest_info["speedtest"]
187
+ for test in speedtests:
188
+ batch_size = test["batch_size"]
189
+ ms = round(test["benchmark"]["total"], 2)
190
+ batch_sizes.add(batch_size)
191
+ data[i][batch_size] = ms
192
+
193
+ batch_sizes = sorted(batch_sizes)
194
+ columns = ["Model"]
195
+ columns_options = [{"disableSort": True}]
196
+ for batch_size in batch_sizes:
197
+ columns.extend([f"Batch size {batch_size}"])
198
+ columns_options.extend(
199
+ [
200
+ {"subtitle": "ms", "tooltip": "Milliseconds for batch images", "postfix": "ms"},
201
+ ]
202
+ )
203
+
204
+ content = []
205
+ for i, eval_result in enumerate(self.eval_results, 1):
206
+ name = f"[{i}] {eval_result.name}"
207
+ row = [name]
208
+ for batch_size in batch_sizes:
209
+ if batch_size in data[i]:
210
+ row.append(data[i][batch_size])
211
+ else:
212
+ row.append("―")
213
+ content.append(
214
+ {
215
+ "row": row,
216
+ "id": name,
217
+ "items": row,
218
+ }
219
+ )
220
+
221
+ data = {
222
+ "columns": columns,
223
+ "columnsOptions": columns_options,
224
+ "content": content,
225
+ }
226
+ return TableWidget(
227
+ name="inference_time_md",
228
+ data=data,
229
+ show_header_controls=False,
230
+ fix_columns=1,
231
+ )
232
+
233
+ @property
234
+ def batch_inference_md(self):
235
+ return MarkdownWidget(
236
+ name="batch_inference",
237
+ title="Batch Inference",
238
+ text=self.vis_texts.markdown_batch_inference,
239
+ )
240
+
241
+ @property
242
+ def chart(self) -> ChartWidget:
243
+ return ChartWidget(name="speed_charts", figure=self.get_figure())
244
+
245
+ def get_figure(self): # -> Optional[go.Figure]
246
+ import plotly.graph_objects as go # pylint: disable=import-error
247
+ from plotly.subplots import make_subplots # pylint: disable=import-error
248
+
249
+ fig = make_subplots(cols=2)
250
+
251
+ for eval_result in self.eval_results:
252
+ if eval_result.speedtest_info is None:
253
+ continue
254
+ temp_res = {}
255
+ for test in eval_result.speedtest_info["speedtest"]:
256
+ batch_size = test["batch_size"]
257
+
258
+ std = test["benchmark_std"]["total"]
259
+ ms = test["benchmark"]["total"]
260
+ fps = round(1000 / test["benchmark"]["total"] * batch_size)
261
+
262
+ ms_line = temp_res.setdefault("ms", {})
263
+ fps_line = temp_res.setdefault("fps", {})
264
+ ms_std_line = temp_res.setdefault("ms_std", {})
265
+
266
+ ms_line[batch_size] = ms
267
+ fps_line[batch_size] = fps
268
+ ms_std_line[batch_size] = round(std, 2)
269
+
270
+ error_color = "rgba(" + ",".join(map(str, hex2rgb(eval_result.color))) + ", 0.5)"
271
+ fig.add_trace(
272
+ go.Scatter(
273
+ x=list(temp_res["ms"].keys()),
274
+ y=list(temp_res["ms"].values()),
275
+ name="Infrence time (ms)",
276
+ line=dict(color=eval_result.color),
277
+ customdata=list(temp_res["ms_std"].values()),
278
+ error_y=dict(
279
+ type="data",
280
+ array=list(temp_res["ms_std"].values()),
281
+ visible=True,
282
+ color=error_color,
283
+ ),
284
+ hovertemplate="Batch Size: %{x}<br>Time: %{y:.2f} ms<br> Standard deviation: %{customdata:.2f} ms<extra></extra>",
285
+ ),
286
+ col=1,
287
+ row=1,
288
+ )
289
+ fig.add_trace(
290
+ go.Scatter(
291
+ x=list(temp_res["fps"].keys()),
292
+ y=list(temp_res["fps"].values()),
293
+ name="FPS",
294
+ line=dict(color=eval_result.color),
295
+ hovertemplate="Batch Size: %{x}<br>FPS: %{y:.2f}<extra></extra>", # <br> Standard deviation: %{customdata:.2f}<extra></extra>",
296
+ ),
297
+ col=2,
298
+ row=1,
299
+ )
300
+
301
+ fig.update_xaxes(title_text="Batch size", col=1, dtick=1)
302
+ fig.update_xaxes(title_text="Batch size", col=2, dtick=1)
303
+
304
+ fig.update_yaxes(title_text="Time (ms)", col=1)
305
+ fig.update_yaxes(title_text="FPS", col=2)
306
+ fig.update_layout(height=400)
307
+
308
+ return fig
@@ -0,0 +1,19 @@
1
+ from typing import List
2
+
3
+ from supervisely.nn.benchmark.visualization.evaluation_result import EvalResult
4
+ from supervisely.nn.benchmark.visualization.widgets import GalleryWidget
5
+
6
+
7
+ class BaseVisMetric:
8
+
9
+ def __init__(
10
+ self,
11
+ vis_texts,
12
+ eval_results: List[EvalResult],
13
+ explore_modal_table: GalleryWidget = None,
14
+ diff_modal_table: GalleryWidget = None,
15
+ ) -> None:
16
+ self.vis_texts = vis_texts
17
+ self.eval_results = eval_results
18
+ self.explore_modal_table = explore_modal_table
19
+ self.diff_modal_table = diff_modal_table
@@ -0,0 +1,298 @@
1
+ import datetime
2
+ from pathlib import Path
3
+
4
+ import supervisely.nn.benchmark.comparison.detection_visualization.text_templates as vis_texts
5
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics import (
6
+ AveragePrecisionByClass,
7
+ CalibrationScore,
8
+ ExplorePredictions,
9
+ LocalizationAccuracyIoU,
10
+ OutcomeCounts,
11
+ Overview,
12
+ PrCurve,
13
+ PrecisionRecallF1,
14
+ Speedtest,
15
+ )
16
+ from supervisely.nn.benchmark.visualization.renderer import Renderer
17
+ from supervisely.nn.benchmark.visualization.widgets import (
18
+ ContainerWidget,
19
+ GalleryWidget,
20
+ MarkdownWidget,
21
+ SidebarWidget,
22
+ )
23
+
24
+
25
+ class DetectionComparisonVisualizer:
26
+ def __init__(self, comparison):
27
+ self.comparison = comparison
28
+ self.api = comparison.api
29
+ self.vis_texts = vis_texts
30
+
31
+ self._create_widgets()
32
+ layout = self._create_layout()
33
+
34
+ self.renderer = Renderer(layout, str(Path(self.comparison.workdir, "visualizations")))
35
+
36
+ def visualize(self):
37
+ return self.renderer.visualize()
38
+
39
+ def upload_results(self, team_id: int, remote_dir: str, progress=None):
40
+ return self.renderer.upload_results(self.api, team_id, remote_dir, progress)
41
+
42
+ def _create_widgets(self):
43
+ # Modal Gellery
44
+ self.diff_modal_table = self._create_diff_modal_table()
45
+ self.explore_modal_table = self._create_explore_modal_table(self.diff_modal_table.id)
46
+
47
+ # Notifcation
48
+ self.clickable_label = self._create_clickable_label()
49
+
50
+ # Speedtest init here for overview
51
+ speedtest = Speedtest(self.vis_texts, self.comparison.evaluation_results)
52
+
53
+ # Overview
54
+ overview = Overview(self.vis_texts, self.comparison.evaluation_results)
55
+ self.header = self._create_header()
56
+ self.overviews = self._create_overviews(overview)
57
+ self.overview_md = overview.overview_md
58
+ self.key_metrics_md = self._create_key_metrics()
59
+ self.key_metrics_table = overview.get_table_widget(
60
+ latency=speedtest.latency, fps=speedtest.fps
61
+ )
62
+ self.overview_chart = overview.chart_widget
63
+
64
+ columns_number = len(self.comparison.evaluation_results) + 1 # +1 for GT
65
+ self.explore_predictions_modal_gallery = self._create_explore_modal_table(columns_number)
66
+ explore_predictions = ExplorePredictions(
67
+ self.vis_texts,
68
+ self.comparison.evaluation_results,
69
+ explore_modal_table=self.explore_predictions_modal_gallery,
70
+ )
71
+ self.explore_predictions_md = explore_predictions.difference_predictions_md
72
+ self.explore_predictions_gallery = explore_predictions.explore_gallery
73
+
74
+ # Outcome Counts
75
+ outcome_counts = OutcomeCounts(
76
+ self.vis_texts,
77
+ self.comparison.evaluation_results,
78
+ explore_modal_table=self.explore_modal_table,
79
+ )
80
+ self.outcome_counts_md = self._create_outcome_counts_md()
81
+ self.outcome_counts_diff_md = self._create_outcome_counts_diff_md()
82
+ self.outcome_counts_main = outcome_counts.chart_widget_main
83
+ self.outcome_counts_comparison = outcome_counts.chart_widget_comparison
84
+
85
+ # Precision-Recall Curve
86
+ pr_curve = PrCurve(self.vis_texts, self.comparison.evaluation_results)
87
+ self.pr_curve_md = pr_curve.markdown_widget
88
+ self.pr_curve_collapsed_widgets = pr_curve.collapsed_widget
89
+ self.pr_curve_table = pr_curve.table_widget
90
+ self.pr_curve_chart = pr_curve.chart_widget
91
+
92
+ # Average Precision by Class
93
+ avg_prec_by_class = AveragePrecisionByClass(
94
+ self.vis_texts,
95
+ self.comparison.evaluation_results,
96
+ explore_modal_table=self.explore_modal_table,
97
+ )
98
+ self.avg_prec_by_class_md = avg_prec_by_class.markdown_widget
99
+ self.avg_prec_by_class_chart = avg_prec_by_class.chart_widget
100
+
101
+ # Precision, Recall, F1
102
+ precision_recall_f1 = PrecisionRecallF1(
103
+ self.vis_texts,
104
+ self.comparison.evaluation_results,
105
+ explore_modal_table=self.explore_modal_table,
106
+ )
107
+ self.precision_recall_f1_md = precision_recall_f1.markdown_widget
108
+ self.precision_recall_f1_table = precision_recall_f1.table_widget
109
+ self.precision_recall_f1_chart = precision_recall_f1.chart_main_widget
110
+ self.precision_per_class_title_md = precision_recall_f1.precision_per_class_title_md
111
+ self.precision_per_class_chart = precision_recall_f1.chart_precision_per_class_widget
112
+ self.recall_per_class_title_md = precision_recall_f1.recall_per_class_title_md
113
+ self.recall_per_class_chart = precision_recall_f1.chart_recall_per_class_widget
114
+ self.f1_per_class_chart = precision_recall_f1.chart_f1_per_class_widget
115
+ self.f1_per_class_title_md = precision_recall_f1.f1_per_class_title_md
116
+
117
+ # Classification Accuracy
118
+ # TODO: ???
119
+
120
+ # Localization Accuracy (IoU)
121
+ loc_acc = LocalizationAccuracyIoU(self.vis_texts, self.comparison.evaluation_results)
122
+ self.loc_acc_header_md = loc_acc.header_md
123
+ self.loc_acc_iou_distribution_md = loc_acc.iou_distribution_md
124
+ self.loc_acc_chart = loc_acc.chart
125
+ self.loc_acc_table = loc_acc.table_widget
126
+
127
+ # Calibration Score
128
+ cal_score = CalibrationScore(self.vis_texts, self.comparison.evaluation_results)
129
+ self.cal_score_md = cal_score.header_md
130
+ self.cal_score_md_2 = cal_score.header_md_2
131
+ self.cal_score_collapse_tip = cal_score.collapse_tip
132
+ self.cal_score_table = cal_score.table
133
+ self.cal_score_reliability_diagram_md = cal_score.reliability_diagram_md
134
+ self.cal_score_reliability_chart = cal_score.reliability_chart
135
+ self.cal_score_collapse_ece = cal_score.collapse_ece
136
+ self.cal_score_confidence_score_md = cal_score.confidence_score_md
137
+ self.cal_score_confidence_chart = cal_score.confidence_chart
138
+ self.cal_score_confidence_score_md_2 = cal_score.confidence_score_md_2
139
+ self.cal_score_collapse_conf_score = cal_score.collapse_conf_score
140
+
141
+ # SpeedTest
142
+ self.speedtest_present = False
143
+ if not speedtest.is_empty():
144
+ self.speedtest_present = True
145
+ self.speedtest_md_intro = speedtest.md_intro
146
+ self.speedtest_intro_table = speedtest.intro_table
147
+ self.speed_inference_time_md = speedtest.inference_time_md
148
+ self.speed_inference_time_table = speedtest.inference_time_table
149
+ self.speed_fps_md = speedtest.fps_md
150
+ self.speed_fps_table = speedtest.fps_table
151
+ self.speed_batch_inference_md = speedtest.batch_inference_md
152
+ self.speed_chart = speedtest.chart
153
+
154
+ def _create_layout(self):
155
+ is_anchors_widgets = [
156
+ # Overview
157
+ (0, self.header),
158
+ (1, self.overview_md),
159
+ (0, self.overviews),
160
+ (1, self.key_metrics_md),
161
+ (0, self.key_metrics_table),
162
+ (0, self.overview_chart),
163
+ # Explore Predictions
164
+ (1, self.explore_predictions_md),
165
+ (0, self.explore_predictions_gallery),
166
+ # Outcome Counts
167
+ (1, self.outcome_counts_md),
168
+ (0, self.outcome_counts_main),
169
+ (0, self.outcome_counts_diff_md),
170
+ (0, self.outcome_counts_comparison),
171
+ # Precision-Recall Curve
172
+ (1, self.pr_curve_md),
173
+ (0, self.pr_curve_collapsed_widgets),
174
+ (0, self.pr_curve_table),
175
+ (0, self.pr_curve_chart),
176
+ # Average Precision by Class
177
+ (1, self.avg_prec_by_class_md),
178
+ (0, self.avg_prec_by_class_chart),
179
+ # Precision, Recall, F1
180
+ (1, self.precision_recall_f1_md),
181
+ (0, self.precision_recall_f1_table),
182
+ (0, self.clickable_label),
183
+ (0, self.precision_recall_f1_chart),
184
+ (0, self.precision_per_class_title_md),
185
+ (0, self.precision_per_class_chart),
186
+ (0, self.recall_per_class_title_md),
187
+ (0, self.recall_per_class_chart),
188
+ (0, self.f1_per_class_title_md),
189
+ (0, self.f1_per_class_chart),
190
+ # Classification Accuracy # TODO
191
+ # Localization Accuracy (IoU)
192
+ (1, self.loc_acc_header_md),
193
+ (0, self.loc_acc_table),
194
+ (0, self.loc_acc_iou_distribution_md),
195
+ (0, self.loc_acc_chart),
196
+ # Calibration Score
197
+ (1, self.cal_score_md),
198
+ (0, self.cal_score_md_2),
199
+ (0, self.cal_score_collapse_tip),
200
+ (0, self.cal_score_table),
201
+ (1, self.cal_score_reliability_diagram_md),
202
+ (0, self.cal_score_reliability_chart),
203
+ (0, self.cal_score_collapse_ece),
204
+ (1, self.cal_score_confidence_score_md),
205
+ (0, self.cal_score_confidence_chart),
206
+ (0, self.cal_score_confidence_score_md_2),
207
+ (0, self.cal_score_collapse_conf_score),
208
+ ]
209
+ if self.speedtest_present:
210
+ is_anchors_widgets.extend(
211
+ [
212
+ # SpeedTest
213
+ (1, self.speedtest_md_intro),
214
+ (0, self.speedtest_intro_table),
215
+ (0, self.speed_inference_time_md),
216
+ (0, self.speed_inference_time_table),
217
+ (0, self.speed_fps_md),
218
+ (0, self.speed_fps_table),
219
+ (0, self.speed_batch_inference_md),
220
+ (0, self.speed_chart),
221
+ ]
222
+ )
223
+ anchors = []
224
+ for is_anchor, widget in is_anchors_widgets:
225
+ if is_anchor:
226
+ anchors.append(widget.id)
227
+
228
+ sidebar = SidebarWidget(widgets=[i[1] for i in is_anchors_widgets], anchors=anchors)
229
+ layout = ContainerWidget(
230
+ widgets=[sidebar, self.explore_modal_table, self.explore_predictions_modal_gallery],
231
+ name="main_container",
232
+ )
233
+ return layout
234
+
235
+ def _create_header(self) -> MarkdownWidget:
236
+ me = self.api.user.get_my_info().login
237
+ current_date = datetime.datetime.now().strftime("%d %B %Y, %H:%M")
238
+ header_main_text = " ∣ ".join( # vs. or | or ∣
239
+ eval_res.name for eval_res in self.comparison.evaluation_results
240
+ )
241
+ header_text = self.vis_texts.markdown_header.format(header_main_text, me, current_date)
242
+ header = MarkdownWidget("markdown_header", "Header", text=header_text)
243
+ return header
244
+
245
+ def _create_overviews(self, vm: Overview) -> ContainerWidget:
246
+ grid_cols = 2
247
+ if len(vm.overview_widgets) > 2:
248
+ grid_cols = 3
249
+ if len(vm.overview_widgets) % 4 == 0:
250
+ grid_cols = 4
251
+ return ContainerWidget(
252
+ vm.overview_widgets,
253
+ name="overview_container",
254
+ title="Overview",
255
+ grid=True,
256
+ grid_cols=grid_cols,
257
+ )
258
+
259
+ def _create_key_metrics(self) -> MarkdownWidget:
260
+ key_metrics_text = self.vis_texts.markdown_key_metrics.format(
261
+ self.vis_texts.definitions.average_precision,
262
+ self.vis_texts.definitions.confidence_threshold,
263
+ self.vis_texts.definitions.confidence_score,
264
+ )
265
+ return MarkdownWidget("markdown_key_metrics", "Key Metrics", text=key_metrics_text)
266
+
267
+ def _create_outcome_counts_md(self) -> MarkdownWidget:
268
+ outcome_counts_text = self.vis_texts.markdown_outcome_counts.format(
269
+ self.vis_texts.definitions.true_positives,
270
+ self.vis_texts.definitions.false_positives,
271
+ self.vis_texts.definitions.false_negatives,
272
+ )
273
+ return MarkdownWidget("markdown_outcome_counts", "Outcome Counts", text=outcome_counts_text)
274
+
275
+ def _create_outcome_counts_diff_md(self) -> MarkdownWidget:
276
+ outcome_counts_text = self.vis_texts.markdown_outcome_counts_diff
277
+ return MarkdownWidget(
278
+ "markdown_outcome_counts_diff", "Outcome Counts Differences", text=outcome_counts_text
279
+ )
280
+
281
+ def _create_explore_modal_table(self, columns_number=3):
282
+ # TODO: table for each evaluation?
283
+ all_predictions_modal_gallery = GalleryWidget(
284
+ "all_predictions_modal_gallery", is_modal=True, columns_number=columns_number
285
+ )
286
+ all_predictions_modal_gallery.set_project_meta(
287
+ self.comparison.evaluation_results[0].dt_project_meta
288
+ )
289
+ return all_predictions_modal_gallery
290
+
291
+ def _create_diff_modal_table(self, columns_number=3) -> GalleryWidget:
292
+ diff_modal_gallery = GalleryWidget(
293
+ "diff_predictions_modal_gallery", is_modal=True, columns_number=columns_number
294
+ )
295
+ return diff_modal_gallery
296
+
297
+ def _create_clickable_label(self):
298
+ return MarkdownWidget("clickable_label", "", text=self.vis_texts.clickable_label)