supervisely 6.73.214__py3-none-any.whl → 6.73.216__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (52) hide show
  1. supervisely/app/fastapi/templating.py +1 -1
  2. supervisely/app/widgets/report_thumbnail/report_thumbnail.py +17 -5
  3. supervisely/app/widgets/team_files_selector/team_files_selector.py +3 -0
  4. supervisely/nn/artifacts/__init__.py +1 -0
  5. supervisely/nn/artifacts/rtdetr.py +32 -0
  6. supervisely/nn/benchmark/comparison/__init__.py +0 -0
  7. supervisely/nn/benchmark/comparison/detection_visualization/__init__.py +0 -0
  8. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +437 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py +27 -0
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +125 -0
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +224 -0
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +112 -0
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +161 -0
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +336 -0
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +249 -0
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +142 -0
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +300 -0
  18. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +308 -0
  19. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/vis_metric.py +19 -0
  20. supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +298 -0
  21. supervisely/nn/benchmark/comparison/model_comparison.py +84 -0
  22. supervisely/nn/benchmark/evaluation/coco/metric_provider.py +9 -7
  23. supervisely/nn/benchmark/visualization/evaluation_result.py +266 -0
  24. supervisely/nn/benchmark/visualization/renderer.py +100 -0
  25. supervisely/nn/benchmark/visualization/report_template.html +46 -0
  26. supervisely/nn/benchmark/visualization/visualizer.py +1 -1
  27. supervisely/nn/benchmark/visualization/widgets/__init__.py +17 -0
  28. supervisely/nn/benchmark/visualization/widgets/chart/__init__.py +0 -0
  29. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +72 -0
  30. supervisely/nn/benchmark/visualization/widgets/chart/template.html +16 -0
  31. supervisely/nn/benchmark/visualization/widgets/collapse/__init__.py +0 -0
  32. supervisely/nn/benchmark/visualization/widgets/collapse/collapse.py +33 -0
  33. supervisely/nn/benchmark/visualization/widgets/container/__init__.py +0 -0
  34. supervisely/nn/benchmark/visualization/widgets/container/container.py +54 -0
  35. supervisely/nn/benchmark/visualization/widgets/gallery/__init__.py +0 -0
  36. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +125 -0
  37. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +49 -0
  38. supervisely/nn/benchmark/visualization/widgets/markdown/__init__.py +0 -0
  39. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +53 -0
  40. supervisely/nn/benchmark/visualization/widgets/notification/__init__.py +0 -0
  41. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +38 -0
  42. supervisely/nn/benchmark/visualization/widgets/sidebar/__init__.py +0 -0
  43. supervisely/nn/benchmark/visualization/widgets/sidebar/sidebar.py +67 -0
  44. supervisely/nn/benchmark/visualization/widgets/table/__init__.py +0 -0
  45. supervisely/nn/benchmark/visualization/widgets/table/table.py +116 -0
  46. supervisely/nn/benchmark/visualization/widgets/widget.py +22 -0
  47. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/METADATA +1 -1
  48. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/RECORD +52 -12
  49. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/LICENSE +0 -0
  50. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/WHEEL +0 -0
  51. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/entry_points.txt +0 -0
  52. {supervisely-6.73.214.dist-info → supervisely-6.73.216.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,84 @@
1
+ import random
2
+ from pathlib import Path
3
+ from typing import List, Optional
4
+
5
+ from supervisely.api.api import Api
6
+ from supervisely.app.widgets import SlyTqdm
7
+ from supervisely.imaging.color import get_predefined_colors, rgb2hex
8
+ from supervisely.nn.benchmark.comparison.detection_visualization.visualizer import (
9
+ DetectionComparisonVisualizer,
10
+ )
11
+ from supervisely.nn.benchmark.visualization.evaluation_result import EvalResult
12
+ from supervisely.task.progress import tqdm_sly
13
+
14
+
15
+ class ModelComparison:
16
+
17
+ def __init__(
18
+ self,
19
+ api: Api,
20
+ remote_eval_dirs: List[str],
21
+ progress: Optional[SlyTqdm] = None,
22
+ workdir: Optional[str] = "./benchmark/comparison",
23
+ ):
24
+ self.api = api
25
+ self.progress = progress or tqdm_sly
26
+ self.workdir = workdir
27
+ self.remote_eval_dirs = remote_eval_dirs
28
+ self.evaluation_results: List[EvalResult] = []
29
+
30
+ colors = get_predefined_colors(len(remote_eval_dirs) * 5) # for better visualizations
31
+ random.shuffle(colors)
32
+ for i, eval_dir in enumerate(remote_eval_dirs):
33
+ local_path = str(Path(self.workdir, "eval_data"))
34
+ eval_result = EvalResult(eval_dir, local_path, self.api, self.progress)
35
+ self.evaluation_results.append(eval_result)
36
+ eval_result.color = rgb2hex(colors[i])
37
+
38
+ self.task_type = self.evaluation_results[0].inference_info.get("task_type")
39
+ self._validate_eval_data()
40
+
41
+ self.visualizer: DetectionComparisonVisualizer = None
42
+ self.remote_dir = None
43
+
44
+ def _validate_eval_data(self):
45
+ """
46
+ Validate the evaluation data before running the comparison.
47
+ Make sure the benchmarks are done on the same project and datasets.
48
+ """
49
+ task_type = None
50
+ img_names = None
51
+ cat_names = None
52
+ for eval_result in self.evaluation_results:
53
+ next_task_type = eval_result.cv_task
54
+ if not task_type is None:
55
+ assert task_type == next_task_type, "Task types are different in the evaluations."
56
+ task_type = next_task_type
57
+ next_img_names = set(
58
+ [img.get("file_name") for img in eval_result.coco_gt.imgs.values()]
59
+ )
60
+ if not img_names is None:
61
+ assert img_names == next_img_names, "Images are different in the evaluations."
62
+ img_names = next_img_names
63
+ next_cat_names = set([cat.get("name") for cat in eval_result.coco_gt.cats.values()])
64
+ if not cat_names is None:
65
+ assert cat_names == next_cat_names, "Categories are different in the evaluations."
66
+ cat_names = next_cat_names
67
+
68
+ def get_metrics(self):
69
+ pass
70
+
71
+ def visualize(self):
72
+ if self.visualizer is None:
73
+ self.visualizer = DetectionComparisonVisualizer(self)
74
+ self.visualizer.visualize()
75
+
76
+ def upload_results(self, team_id: int, remote_dir: str, progress=None) -> str:
77
+ self.remote_dir = self.visualizer.upload_results(team_id, remote_dir, progress)
78
+ return self.remote_dir
79
+
80
+ def get_report_link(self) -> str:
81
+ if self.remote_dir is None:
82
+ raise ValueError("Results are not uploaded yet.")
83
+ report_link = self.remote_dir.rstrip("/") + "/template.vue"
84
+ return report_link
@@ -89,7 +89,9 @@ class MetricProvider:
89
89
  self.coco_precision = coco_metrics["precision"]
90
90
  self.iouThrs = params["iouThrs"]
91
91
  self.recThrs = params["recThrs"]
92
- self.iou_threshold = params["evaluation_params"]["iou_threshold"]
92
+
93
+ eval_params = params.get("evaluation_params", {})
94
+ self.iou_threshold = eval_params.get("iou_threshold", 0.5)
93
95
  self.iou_threshold_idx = np.searchsorted(self.iouThrs, self.iou_threshold)
94
96
 
95
97
  def calculate(self):
@@ -136,7 +138,7 @@ class MetricProvider:
136
138
  self._scores_tp_and_fp = self.m_full.scores_tp_and_fp()
137
139
  self._maximum_calibration_error = self.m_full.maximum_calibration_error()
138
140
  self._expected_calibration_error = self.m_full.expected_calibration_error()
139
-
141
+
140
142
  def json_metrics(self):
141
143
  base = self.base_metrics()
142
144
  iou_name = int(self.iou_threshold * 100)
@@ -146,8 +148,8 @@ class MetricProvider:
146
148
  ap_custom_by_class = dict(zip(self.cat_names, ap_custom_by_class))
147
149
  return {
148
150
  "mAP": base["mAP"],
149
- "AP50": self.coco_metrics["AP50"],
150
- "AP75": self.coco_metrics["AP75"],
151
+ "AP50": self.coco_metrics.get("AP50"),
152
+ "AP75": self.coco_metrics.get("AP75"),
151
153
  f"AP{iou_name}": self.AP_custom(),
152
154
  "f1": base["f1"],
153
155
  "precision": base["precision"],
@@ -161,7 +163,7 @@ class MetricProvider:
161
163
  "AP_by_class": ap_by_class,
162
164
  f"AP{iou_name}_by_class": ap_custom_by_class,
163
165
  }
164
-
166
+
165
167
  def metric_table(self):
166
168
  table = self.json_metrics()
167
169
  iou_name = int(self.iou_threshold * 100)
@@ -190,10 +192,10 @@ class MetricProvider:
190
192
  s[s == -1] = np.nan
191
193
  ap = np.nanmean(s, axis=0)
192
194
  return ap
193
-
195
+
194
196
  def AP_custom(self):
195
197
  return np.nanmean(self.AP_custom_per_class())
196
-
198
+
197
199
  def base_metrics(self):
198
200
  base = self._base_metrics
199
201
  calibration_score = 1 - self._expected_calibration_error
@@ -0,0 +1,266 @@
1
+ import pickle
2
+ from pathlib import Path
3
+ from typing import Dict, List, Optional
4
+
5
+ import pandas as pd
6
+
7
+ from supervisely.annotation.annotation import ProjectMeta
8
+ from supervisely.api.api import Api
9
+ from supervisely.api.dataset_api import DatasetInfo
10
+ from supervisely.api.project_api import ProjectInfo
11
+ from supervisely.app.widgets import SlyTqdm
12
+ from supervisely.io.env import team_id
13
+ from supervisely.io.fs import dir_empty, mkdir
14
+ from supervisely.io.json import load_json_file
15
+ from supervisely.nn.benchmark.evaluation.coco.metric_provider import MetricProvider
16
+ from supervisely.nn.benchmark.visualization.vis_click_data import ClickData, IdMapper
17
+ from supervisely.sly_logger import logger
18
+ from supervisely.task.progress import tqdm_sly
19
+
20
+
21
+ # class ImageComparisonData:
22
+ # def __init__(
23
+ # self,
24
+ # gt_image_info: ImageInfo = None,
25
+ # pred_image_info: ImageInfo = None,
26
+ # diff_image_info: ImageInfo = None,
27
+ # gt_annotation: Annotation = None,
28
+ # pred_annotation: Annotation = None,
29
+ # diff_annotation: Annotation = None,
30
+ # ):
31
+ # self.gt_image_info = gt_image_info
32
+ # self.pred_image_info = pred_image_info
33
+ # self.diff_image_info = diff_image_info
34
+ # self.gt_annotation = gt_annotation
35
+ # self.pred_annotation = pred_annotation
36
+ # self.diff_annotation = diff_annotation
37
+
38
+
39
+ class EvalResult:
40
+
41
+ def __init__(
42
+ self,
43
+ eval_dir: str,
44
+ workdir: str,
45
+ api: Api,
46
+ progress: Optional[SlyTqdm] = None,
47
+ ):
48
+ from pycocotools.coco import COCO # pylint: disable=import-error
49
+
50
+ self.eval_dir = eval_dir
51
+ self.report_path = Path(eval_dir, "visualizations", "template.vue").as_posix()
52
+ self.workdir = workdir
53
+ self.api = api
54
+ self.team_id = team_id()
55
+ self.local_dir = str(Path(self.workdir, self.eval_dir.lstrip("/")))
56
+ self.progress = progress or tqdm_sly
57
+
58
+ self.coco_gt: COCO = None
59
+ self.coco_dt: COCO = None
60
+ self.inference_info: Dict = None
61
+ self.speedtest_info: Dict = None
62
+ self.eval_data: Dict = None
63
+ self.mp: MetricProvider = None
64
+ self.df_score_profile: pd.DataFrame = None
65
+ self.dfsp_down: pd.DataFrame = None
66
+ self.f1_optimal_conf: float = None
67
+ self.click_data: ClickData = None
68
+ # self.comparison_data: Dict[int, ImageComparisonData] = {}
69
+ self.color = None
70
+
71
+ self._gt_project_info = None
72
+ self._gt_project_meta = None
73
+ self._gt_dataset_infos = None
74
+ self._dt_project_id = None
75
+ self._dt_project_meta = None
76
+
77
+ self._load_eval_data()
78
+ self._read_eval_data()
79
+
80
+ @property
81
+ def cv_task(self):
82
+ return self.inference_info.get("task_type")
83
+
84
+ @property
85
+ def name(self) -> str:
86
+ model_name = self.inference_info.get("model_name", self.eval_dir)
87
+ return self.inference_info.get("deploy_params", {}).get("checkpoint_name", model_name)
88
+
89
+ @property
90
+ def gt_project_id(self) -> int:
91
+ return self.inference_info.get("gt_project_id")
92
+
93
+ @property
94
+ def gt_project_info(self) -> ProjectInfo:
95
+ if self._gt_project_info is None:
96
+ gt_project_id = self.inference_info.get("gt_project_id")
97
+ self._gt_project_info = self.api.project.get_info_by_id(gt_project_id)
98
+ return self._gt_project_info
99
+
100
+ @property
101
+ def gt_project_meta(self) -> ProjectMeta:
102
+ if self._gt_project_meta is None:
103
+ self._gt_project_meta = ProjectMeta.from_json(
104
+ self.api.project.get_meta(self.gt_project_id)
105
+ )
106
+ return self._gt_project_meta
107
+
108
+ @property
109
+ def gt_dataset_ids(self) -> List[int]:
110
+ return self.inference_info.get("gt_dataset_ids", None)
111
+
112
+ @property
113
+ def gt_dataset_infos(self) -> List[DatasetInfo]:
114
+ if self._gt_dataset_infos is None:
115
+ filters = None
116
+ if self.gt_dataset_ids is not None:
117
+ filters = [{"field": "id", "operator": "in", "value": self.gt_dataset_ids}]
118
+ self._gt_dataset_infos = self.api.dataset.get_list(
119
+ self.gt_project_id,
120
+ filters=filters,
121
+ recursive=True,
122
+ )
123
+ return self._gt_dataset_infos
124
+
125
+ @property
126
+ def dt_project_id(self):
127
+ if self._dt_project_id is None:
128
+ self._dt_project_id = self.inference_info.get("dt_project_id")
129
+ return self._dt_project_id
130
+
131
+ @property
132
+ def dt_project_meta(self):
133
+ if self._dt_project_meta is None:
134
+ self._dt_project_meta = ProjectMeta.from_json(
135
+ self.api.project.get_meta(self.dt_project_id)
136
+ )
137
+ return self._dt_project_meta
138
+
139
+ @property
140
+ def train_info(self):
141
+ return self.inference_info.get("train_info", None) # TODO: check
142
+
143
+ @property
144
+ def gt_images_ids(self):
145
+ return self.inference_info.get("gt_images_ids", None) # TODO: check
146
+
147
+ @property
148
+ def classes_whitelist(self):
149
+ return self.inference_info.get("inference_settings", {}).get("classes", []) # TODO: check
150
+
151
+ def _load_eval_data(self):
152
+ if not dir_empty(self.local_dir):
153
+ return
154
+ if not self.api.storage.dir_exists(self.team_id, self.eval_dir):
155
+ raise ValueError(f"Directory {self.eval_dir} not found in storage.")
156
+ mkdir(self.local_dir)
157
+ dir_name = Path(self.eval_dir).name
158
+ with self.progress(
159
+ message=f"Downloading evaluation data from {dir_name}",
160
+ total=self.api.storage.get_directory_size(self.team_id, self.eval_dir),
161
+ unit="B",
162
+ unit_scale=True,
163
+ unit_divisor=1024,
164
+ ) as pbar:
165
+ self.api.storage.download_directory(
166
+ self.team_id, self.eval_dir, self.local_dir, progress_cb=pbar.update
167
+ )
168
+
169
+ # def _load_projects(self):
170
+ # projects_dir = Path(self.local_dir, "projects")
171
+ # items_total = self.gt_images_ids
172
+ # if items_total is None:
173
+ # items_total = sum(self.gt_dataset_infos, key=lambda x: x.items_count)
174
+ # with self.progress(
175
+ # message=f"Downloading GT project {self.gt_project_info.name} and datasets",
176
+ # total=items_total,
177
+ # ) as pbar:
178
+ # download_project(
179
+ # self.api,
180
+ # self.gt_project_info.id,
181
+ # str(projects_dir),
182
+ # dataset_ids=self.gt_dataset_ids,
183
+ # progress_cb=pbar.update,
184
+ # )
185
+
186
+ def _read_eval_data(self):
187
+ from pycocotools.coco import COCO # pylint: disable=import-error
188
+
189
+ gt_path = str(Path(self.local_dir, "evaluation", "cocoGt.json"))
190
+ dt_path = str(Path(self.local_dir, "evaluation", "cocoDt.json"))
191
+ coco_gt, coco_dt = COCO(gt_path), COCO(dt_path)
192
+ self.coco_gt = coco_gt
193
+ self.coco_dt = coco_dt
194
+ self.eval_data = pickle.load(
195
+ open(Path(self.local_dir, "evaluation", "eval_data.pkl"), "rb")
196
+ )
197
+ self.inference_info = load_json_file(
198
+ Path(self.local_dir, "evaluation", "inference_info.json")
199
+ )
200
+ speedtest_info_path = Path(self.local_dir, "speedtest", "speedtest.json")
201
+ if speedtest_info_path.exists():
202
+ self.speedtest_info = load_json_file(
203
+ Path(self.local_dir, "speedtest", "speedtest.json")
204
+ )
205
+
206
+ self.mp = MetricProvider(
207
+ self.eval_data["matches"],
208
+ self.eval_data["coco_metrics"],
209
+ self.eval_data["params"],
210
+ self.coco_gt,
211
+ self.coco_dt,
212
+ )
213
+ self.mp.calculate()
214
+
215
+ self.df_score_profile = pd.DataFrame(
216
+ self.mp.confidence_score_profile(), columns=["scores", "precision", "recall", "f1"]
217
+ )
218
+
219
+ # downsample
220
+ if len(self.df_score_profile) > 5000:
221
+ self.dfsp_down = self.df_score_profile.iloc[:: len(self.df_score_profile) // 1000]
222
+ else:
223
+ self.dfsp_down = self.df_score_profile
224
+
225
+ self.f1_optimal_conf = self.mp.get_f1_optimal_conf()[0]
226
+ if self.f1_optimal_conf is None:
227
+ self.f1_optimal_conf = 0.01
228
+ logger.warning("F1 optimal confidence cannot be calculated. Using 0.01 as default.")
229
+
230
+ # Click data
231
+ gt_id_mapper = IdMapper(self.coco_gt.dataset)
232
+ dt_id_mapper = IdMapper(self.coco_dt.dataset)
233
+
234
+ self.click_data = ClickData(self.mp.m, gt_id_mapper, dt_id_mapper)
235
+
236
+ # def _update_comparison_data(
237
+ # self,
238
+ # gt_image_id: int,
239
+ # gt_image_info: ImageInfo = None,
240
+ # pred_image_info: ImageInfo = None,
241
+ # diff_image_info: ImageInfo = None,
242
+ # gt_annotation: Annotation = None,
243
+ # pred_annotation: Annotation = None,
244
+ # diff_annotation: Annotation = None,
245
+ # ):
246
+ # comparison_data = self.comparison_data.get(gt_image_id, None)
247
+ # if comparison_data is None:
248
+ # self.comparison_data[gt_image_id] = ImageComparisonData(
249
+ # gt_image_info=gt_image_info,
250
+ # pred_image_info=pred_image_info,
251
+ # diff_image_info=diff_image_info,
252
+ # gt_annotation=gt_annotation,
253
+ # pred_annotation=pred_annotation,
254
+ # diff_annotation=diff_annotation,
255
+ # )
256
+ # else:
257
+ # for attr, value in {
258
+ # "gt_image_info": gt_image_info,
259
+ # "pred_image_info": pred_image_info,
260
+ # "diff_image_info": diff_image_info,
261
+ # "gt_annotation": gt_annotation,
262
+ # "pred_annotation": pred_annotation,
263
+ # "diff_annotation": diff_annotation,
264
+ # }.items():
265
+ # if value is not None:
266
+ # setattr(comparison_data, attr, value)
@@ -0,0 +1,100 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Optional
4
+
5
+ from jinja2 import Template
6
+
7
+ from supervisely.api.api import Api
8
+ from supervisely.io.fs import dir_empty, get_directory_size
9
+ from supervisely.nn.benchmark.visualization.widgets import BaseWidget
10
+ from supervisely.task.progress import tqdm_sly
11
+
12
+
13
+ class Renderer:
14
+ """
15
+ Base class for rendering visualizations of Evaluation Report.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ layout: BaseWidget,
21
+ base_dir: str = "./output",
22
+ template: str = None,
23
+ ) -> None:
24
+ if template is None:
25
+ template = (
26
+ Path(__file__).parents[1].joinpath("visualization/report_template.html").read_text()
27
+ )
28
+ self.main_template = template
29
+ self.layout = layout
30
+ self.base_dir = base_dir
31
+
32
+ if Path(base_dir).exists():
33
+ if not dir_empty(base_dir):
34
+ raise ValueError(f"Output directory {base_dir} is not empty.")
35
+
36
+ @property
37
+ def _template_data(self):
38
+ return {"layout": self.layout.to_html()}
39
+
40
+ def render(self):
41
+ return Template(self.main_template).render(self._template_data)
42
+
43
+ def get_state(self):
44
+ return {}
45
+
46
+ def save(self) -> None:
47
+ self.layout.save_data(self.base_dir)
48
+ state = self.layout.get_state()
49
+ with open(Path(self.base_dir).joinpath("state.json"), "w") as f:
50
+ json.dump(state, f)
51
+ template = self.render()
52
+ with open(Path(self.base_dir).joinpath("template.vue"), "w") as f:
53
+ f.write(template)
54
+ return template
55
+
56
+ def visualize(self):
57
+ return self.save()
58
+
59
+ def upload_results(
60
+ self, api: Api, team_id: int, remote_dir: str, progress: Optional[tqdm_sly] = None
61
+ ) -> str:
62
+ if dir_empty(self.base_dir):
63
+ raise RuntimeError(
64
+ "No visualizations to upload. You should call visualize method first."
65
+ )
66
+ if progress is None:
67
+ progress = tqdm_sly
68
+ dir_total = get_directory_size(self.base_dir)
69
+ dir_name = Path(remote_dir).name
70
+ with progress(
71
+ message=f"Uploading visualizations to {dir_name}",
72
+ total=dir_total,
73
+ unit="B",
74
+ unit_scale=True,
75
+ unit_divisor=1024,
76
+ ) as pbar:
77
+ remote_dir = api.file.upload_directory(
78
+ team_id,
79
+ self.base_dir,
80
+ remote_dir,
81
+ change_name_if_conflict=True,
82
+ progress_size_cb=pbar,
83
+ )
84
+ src = self.save_report_link(api, team_id, remote_dir)
85
+ api.file.upload(team_id=team_id, src=src, dst=remote_dir.rstrip("/") + "/open.lnk")
86
+ return remote_dir
87
+
88
+ def save_report_link(self, api: Api, team_id: int, remote_dir: str):
89
+ report_link = self.get_report_link(api, team_id, remote_dir)
90
+ pth = Path(self.base_dir).joinpath("open.lnk")
91
+ with open(pth, "w") as f:
92
+ f.write(report_link)
93
+ return str(pth)
94
+
95
+ def get_report_link(self, api: Api, team_id: int, remote_dir: str):
96
+ template_path = remote_dir.rstrip("/") + "/" + "template.vue"
97
+ vue_template_info = api.file.get_info_by_path(team_id, template_path)
98
+
99
+ report_link = "/model-benchmark?id=" + str(vue_template_info.id)
100
+ return report_link
@@ -0,0 +1,46 @@
1
+ <div class="model-benchmark-body">
2
+
3
+ <sly-style>
4
+ .model-benchmark-body .sly-markdown-widget .markdown-body { padding: 0;
5
+ font-family: inherit; }
6
+ .model-benchmark-body .sly-markdown-widget .markdown-body h2 {
7
+ font-size: 18px; font-weight: 600; margin-bottom:
8
+ 0px; border: 0; }
9
+ .model-benchmark-body .sly-markdown-widget .markdown-body h3 { color:
10
+ #949bab; font-size: 18px; margin-bottom:
11
+ 7px; }
12
+ .model-benchmark-body .sly-markdown-widget .markdown-body p {
13
+ margin-bottom: 12px; }
14
+ .model-benchmark-body .el-collapse { margin: 15px 0; box-shadow: 0 1px
15
+ 2px rgba(0, 0, 0, 0.1); border-radius:
16
+ 7px; width: fit-content; }
17
+ .model-benchmark-body .el-collapse .el-collapse-item__header {
18
+ background: transparent; padding-right: 15px; }
19
+ .model-benchmark-body .model-info-block { display: flex; gap: 10px;
20
+ align-items: center; margin: 0 0 15px;
21
+ color: #778592; font-size: 13px; }
22
+ .model-benchmark-body .model-info-block > div { display: flex; gap: 4px;
23
+ align-items: center; }
24
+ /* , .model-benchmark-body .sly-markdown-widget
25
+ .markdown-body>*:last-child */
26
+ .model-benchmark-body .sly-iw-notification-box
27
+ .notification-box.notification-box-info { width: fit-content; }
28
+ .model-benchmark-body h1 { font-size: 20px; font-weight: bold;
29
+ margin-bottom: 5px; }
30
+ .model-benchmark-body .overview-info-block { background: #f4f7fb; border-radius: 12px;
31
+ padding: 20px; }
32
+ .model-benchmark-body .width-fit-content { width: fit-content;}
33
+ .model-benchmark-body .overview-info-block ul { list-style: none;
34
+ padding: 0; }
35
+ .model-benchmark-body .overview-info-block ul p { padding: 0; }
36
+ .model-benchmark-body .overview-info-block .checkpoint-url { display:
37
+ inline-block; width: 250px; white-space: nowrap; overflow: hidden;
38
+ text-overflow: ellipsis; vertical-align: middle; }
39
+ .model-benchmark-body .sly-sidebar-widget .main-wrapper .sidebar-panel {
40
+ top: 10px; }
41
+
42
+ </sly-style>
43
+
44
+ {{ layout }}
45
+
46
+ </div>
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
- import os
4
3
  import json
4
+ import os
5
5
  import pickle
6
6
  from typing import TYPE_CHECKING, Dict, List, Tuple
7
7
 
@@ -0,0 +1,17 @@
1
+ from supervisely.nn.benchmark.visualization.widgets.chart.chart import ChartWidget
2
+ from supervisely.nn.benchmark.visualization.widgets.collapse.collapse import (
3
+ CollapseWidget,
4
+ )
5
+ from supervisely.nn.benchmark.visualization.widgets.container.container import (
6
+ ContainerWidget,
7
+ )
8
+ from supervisely.nn.benchmark.visualization.widgets.gallery.gallery import GalleryWidget
9
+ from supervisely.nn.benchmark.visualization.widgets.markdown.markdown import (
10
+ MarkdownWidget,
11
+ )
12
+ from supervisely.nn.benchmark.visualization.widgets.notification.notification import (
13
+ NotificationWidget,
14
+ )
15
+ from supervisely.nn.benchmark.visualization.widgets.sidebar.sidebar import SidebarWidget
16
+ from supervisely.nn.benchmark.visualization.widgets.table.table import TableWidget
17
+ from supervisely.nn.benchmark.visualization.widgets.widget import BaseWidget
@@ -0,0 +1,72 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Dict
4
+
5
+ from jinja2 import Template
6
+
7
+ from supervisely.io.fs import ensure_base_path
8
+ from supervisely.nn.benchmark.visualization.widgets.widget import BaseWidget
9
+
10
+
11
+ class ChartWidget(BaseWidget):
12
+ def __init__(
13
+ self,
14
+ name: str,
15
+ figure, # plotly figure
16
+ ) -> None:
17
+ super().__init__(name)
18
+ self.radio_group = "radio_group" # TODO: fix
19
+ self.switch_key = "switch_key" # TODO: fix
20
+
21
+ self.figure = figure
22
+ self.click_data = None
23
+ self.click_gallery_id = None
24
+ self.chart_click_extra = None
25
+
26
+ def save_data(self, basepath: str) -> None:
27
+ # init data
28
+ basepath = basepath.rstrip("/")
29
+ ensure_base_path(basepath + self.data_source)
30
+
31
+ with open(basepath + self.data_source, "w") as f:
32
+ json.dump(self.get_init_data(), f)
33
+
34
+ # click data
35
+ if self.click_data is not None:
36
+ ensure_base_path(basepath + self.click_data_source)
37
+ with open(basepath + self.click_data_source, "w") as f:
38
+ json.dump(self.click_data, f)
39
+
40
+ def set_click_data(
41
+ self, gallery_id: str, click_data: Dict, chart_click_extra: str = ""
42
+ ) -> None:
43
+ self.click_data = click_data
44
+ self.click_gallery_id = gallery_id
45
+ self.chart_click_extra = chart_click_extra
46
+
47
+ def _get_template_data(self):
48
+ return {
49
+ "widget_id": self.id,
50
+ "radio_group": self.radio_group,
51
+ "switch_key": self.switch_key,
52
+ "init_data_source": self.data_source,
53
+ "click_handled": self.click_data is not None,
54
+ "chart_click_data_source": self.click_data_source,
55
+ "gallery_id": self.click_gallery_id,
56
+ "chart_click_extra": self.chart_click_extra,
57
+ }
58
+
59
+ def to_html(self) -> str:
60
+ template_str = Path(__file__).parent / "template.html"
61
+ return Template(template_str.read_text()).render(self._get_template_data())
62
+
63
+ def get_init_data(self):
64
+ return {
65
+ "selected": None,
66
+ "galleryContent": "",
67
+ "dialogVisible": False,
68
+ "chartContent": json.loads(self.figure.to_json()),
69
+ }
70
+
71
+ def get_state(self) -> Dict:
72
+ return {}
@@ -0,0 +1,16 @@
1
+ <div style="margin-top: 20px; margin-bottom: 20px;">
2
+ <sly-iw-chart iw-widget-id="{{ widget_id }}" {% if switchable %}
3
+ v-show="state.{{ radio_group }} === '{{ switch_key }}'" {% endif %} :actions="{
4
+ 'init': {
5
+ 'dataSource': '{{ init_data_source }}',
6
+ },
7
+ {% if click_handled %}
8
+ 'chart-click': {
9
+ 'dataSource': '{{ chart_click_data_source }}',
10
+ {{ chart_click_extra }}
11
+ 'galleryId': '{{ gallery_id }}',
12
+ 'limit': 9
13
+ },
14
+ {% endif %}
15
+ }" :command="command" :data="data" />
16
+ </div>