supervisely 6.73.237__py3-none-any.whl → 6.73.239__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (138) hide show
  1. supervisely/annotation/annotation.py +2 -2
  2. supervisely/api/entity_annotation/tag_api.py +11 -4
  3. supervisely/geometry/rectangle.py +7 -8
  4. supervisely/nn/__init__.py +1 -0
  5. supervisely/nn/benchmark/__init__.py +14 -2
  6. supervisely/nn/benchmark/base_benchmark.py +84 -37
  7. supervisely/nn/benchmark/base_evaluator.py +120 -0
  8. supervisely/nn/benchmark/base_visualizer.py +265 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
  18. supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
  19. supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
  20. supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
  21. supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
  22. supervisely/nn/benchmark/object_detection/__init__.py +0 -0
  23. supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
  24. supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
  25. supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
  26. supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
  27. supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
  28. supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
  29. supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
  30. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
  31. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
  32. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
  33. supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
  34. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
  35. supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
  36. supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
  37. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
  38. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
  39. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
  40. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
  41. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
  42. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
  43. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
  44. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
  45. supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
  46. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
  47. supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
  48. supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
  49. supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
  50. supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
  51. supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
  52. supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
  53. supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
  54. supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
  55. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
  56. supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
  57. supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
  58. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
  59. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
  60. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
  61. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
  62. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
  63. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
  64. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
  65. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
  66. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
  67. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
  68. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
  69. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
  70. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
  71. supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
  72. supervisely/nn/benchmark/utils/__init__.py +12 -0
  73. supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
  74. supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
  75. supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
  76. supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
  77. supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
  78. supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
  79. supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
  80. supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
  81. supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
  82. supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
  83. supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
  84. supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
  85. supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
  86. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
  87. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
  88. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
  89. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
  90. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
  91. supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
  92. supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
  93. supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
  94. supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
  95. supervisely/project/project.py +18 -6
  96. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/METADATA +3 -1
  97. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/RECORD +104 -82
  98. supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
  99. supervisely/nn/benchmark/evaluation/__init__.py +0 -3
  100. supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
  101. supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
  102. supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
  103. supervisely/nn/benchmark/utils.py +0 -13
  104. supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
  105. supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
  106. supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
  107. supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
  108. supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
  109. supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
  110. supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
  111. supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
  112. supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
  113. supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
  114. supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
  115. supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
  116. supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
  117. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
  118. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
  119. supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
  120. supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
  121. supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
  122. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
  123. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
  124. supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
  125. supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
  126. supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
  127. supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
  128. supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
  129. supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
  130. supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
  131. supervisely/nn/benchmark/visualization/visualizer.py +0 -729
  132. /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
  133. /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
  134. /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
  135. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/LICENSE +0 -0
  136. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/WHEEL +0 -0
  137. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/entry_points.txt +0 -0
  138. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/top_level.txt +0 -0
@@ -1,729 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import os
5
- import pickle
6
- from typing import TYPE_CHECKING, Dict, List, Tuple
7
-
8
- import pandas as pd
9
- from jinja2 import Template
10
-
11
- from supervisely import AnyGeometry, Bitmap, Polygon, Rectangle
12
- from supervisely._utils import batched
13
- from supervisely.annotation.annotation import Annotation
14
- from supervisely.annotation.tag import Tag
15
- from supervisely.annotation.tag_meta import TagApplicableTo, TagMeta, TagValueType
16
- from supervisely.api.image_api import ImageInfo
17
- from supervisely.convert.image.coco.coco_helper import HiddenCocoPrints
18
- from supervisely.io import fs
19
- from supervisely.io.fs import file_exists, mkdir
20
- from supervisely.nn.benchmark.cv_tasks import CVTask
21
- from supervisely.project.project import Dataset, OpenMode, Project
22
- from supervisely.project.project_meta import ProjectMeta
23
-
24
- if TYPE_CHECKING:
25
- from supervisely.nn.benchmark.base_benchmark import BaseBenchmark
26
-
27
- from supervisely import Label
28
- from supervisely.nn.benchmark.evaluation.coco.metric_provider import MetricProvider
29
- from supervisely.nn.benchmark.visualization.inference_speed import SPEEDTEST_METRICS
30
- from supervisely.nn.benchmark.visualization.text_templates import (
31
- inference_speed_text,
32
- instance_segmentation_text,
33
- object_detection_text,
34
- )
35
- from supervisely.nn.benchmark.visualization.vis_click_data import ClickData, IdMapper
36
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
37
- from supervisely.nn.benchmark.visualization.vis_metrics import ALL_METRICS
38
- from supervisely.nn.benchmark.visualization.vis_templates import generate_main_template
39
- from supervisely.nn.benchmark.visualization.vis_widgets import Widget
40
- from supervisely.project.project_meta import ProjectMeta
41
- from supervisely.sly_logger import logger
42
-
43
-
44
- class ImageComparisonData:
45
- def __init__(
46
- self,
47
- gt_image_info: ImageInfo = None,
48
- pred_image_info: ImageInfo = None,
49
- diff_image_info: ImageInfo = None,
50
- gt_annotation: Annotation = None,
51
- pred_annotation: Annotation = None,
52
- diff_annotation: Annotation = None,
53
- ):
54
- self.gt_image_info = gt_image_info
55
- self.pred_image_info = pred_image_info
56
- self.diff_image_info = diff_image_info
57
- self.gt_annotation = gt_annotation
58
- self.pred_annotation = pred_annotation
59
- self.diff_annotation = diff_annotation
60
-
61
-
62
- class Visualizer:
63
-
64
- def __init__(self, benchmark: BaseBenchmark) -> None:
65
-
66
- if benchmark.dt_project_info is None:
67
- raise RuntimeError(
68
- "The benchmark prediction project was not initialized. Please run evaluation or specify dt_project_info property of benchmark object."
69
- )
70
-
71
- eval_dir = benchmark.get_eval_results_dir()
72
- assert not fs.dir_empty(
73
- eval_dir
74
- ), f"The result dir {eval_dir!r} is empty. You should run evaluation before visualizing results."
75
-
76
- self._benchmark = benchmark
77
- self._api = benchmark.api
78
- self.cv_task = benchmark.cv_task
79
- self.hardware = benchmark.hardware
80
-
81
- self.eval_dir = benchmark.get_eval_results_dir()
82
- self.layout_dir = benchmark.get_layout_results_dir()
83
- self.dt_project_info = benchmark.dt_project_info
84
- self.gt_project_info = benchmark.gt_project_info
85
- self._benchmark.diff_project_info, existed = self._benchmark._get_or_create_diff_project()
86
- self.diff_project_info = benchmark.diff_project_info
87
- self.classes_whitelist = benchmark.classes_whitelist
88
- self.diff_project_meta = ProjectMeta.from_json(
89
- self._api.project.get_meta(self.diff_project_info.id)
90
- )
91
- self.comparison_data: Dict[int, ImageComparisonData] = {} # gt_id -> ImageComparisonData
92
-
93
- self.gt_project_meta = self._get_filtered_project_meta(self.gt_project_info.id)
94
- self.dt_project_meta = self._get_filtered_project_meta(self.dt_project_info.id)
95
- self._docs_link = "https://docs.supervisely.com/neural-networks/model-evaluation-benchmark/"
96
-
97
- self.speedtest = benchmark._speedtest
98
- self.inference_speed_text = inference_speed_text
99
-
100
- if benchmark.cv_task == CVTask.OBJECT_DETECTION:
101
- self._initialize_loader()
102
- self.docs_link = self._docs_link + CVTask.OBJECT_DETECTION.value.replace("_", "-")
103
- self.vis_texts = object_detection_text
104
- elif benchmark.cv_task == CVTask.INSTANCE_SEGMENTATION:
105
- self._initialize_loader()
106
- self.docs_link = self._docs_link + CVTask.INSTANCE_SEGMENTATION.value.replace("_", "-")
107
- self.vis_texts = instance_segmentation_text
108
- else:
109
- raise NotImplementedError(f"CV task {benchmark.cv_task} is not supported yet")
110
-
111
- self.pbar = benchmark.pbar
112
-
113
- if not existed:
114
- self.update_diff_annotations()
115
- else:
116
- self._init_comparison_data()
117
-
118
- def _initialize_loader(self):
119
- from pycocotools.coco import COCO # pylint: disable=import-error
120
-
121
- cocoGt_path, cocoDt_path, eval_data_path, inference_info_path = (
122
- self.eval_dir + "/cocoGt.json",
123
- self.eval_dir + "/cocoDt.json",
124
- self.eval_dir + "/eval_data.pkl",
125
- self.eval_dir + "/inference_info.json",
126
- )
127
-
128
- with open(cocoGt_path, "r") as f:
129
- cocoGt_dataset = json.load(f)
130
- with open(cocoDt_path, "r") as f:
131
- cocoDt_dataset = json.load(f)
132
-
133
- # Remove COCO read logs
134
- with HiddenCocoPrints():
135
- cocoGt = COCO()
136
- cocoGt.dataset = cocoGt_dataset
137
- cocoGt.createIndex()
138
- cocoDt = cocoGt.loadRes(cocoDt_dataset["annotations"])
139
-
140
- with open(eval_data_path, "rb") as f:
141
- eval_data = pickle.load(f)
142
-
143
- inference_info = {}
144
- if file_exists(inference_info_path):
145
- with open(inference_info_path, "r") as f:
146
- inference_info = json.load(f)
147
- self.inference_info = inference_info
148
- else:
149
- self.inference_info = self._benchmark._eval_inference_info
150
-
151
- self.mp = MetricProvider(
152
- eval_data["matches"],
153
- eval_data["coco_metrics"],
154
- eval_data["params"],
155
- cocoGt,
156
- cocoDt,
157
- )
158
- self.mp.calculate()
159
- self._dump_key_metrics()
160
-
161
- self.df_score_profile = pd.DataFrame(
162
- self.mp.confidence_score_profile(), columns=["scores", "precision", "recall", "f1"]
163
- )
164
-
165
- # downsample
166
- if len(self.df_score_profile) > 5000:
167
- self.dfsp_down = self.df_score_profile.iloc[:: len(self.df_score_profile) // 1000]
168
- else:
169
- self.dfsp_down = self.df_score_profile
170
-
171
- self.f1_optimal_conf = self.mp.get_f1_optimal_conf()[0]
172
- if self.f1_optimal_conf is None:
173
- self.f1_optimal_conf = 0.01
174
- logger.warn("F1 optimal confidence cannot be calculated. Using 0.01 as default.")
175
-
176
- # Click data
177
- gt_id_mapper = IdMapper(cocoGt_dataset)
178
- dt_id_mapper = IdMapper(cocoDt_dataset)
179
-
180
- self.click_data = ClickData(self.mp.m, gt_id_mapper, dt_id_mapper)
181
- self.base_metrics = self.mp.base_metrics
182
-
183
- self._objects_bindings = []
184
-
185
- def visualize(self):
186
- from supervisely.app.widgets import GridGalleryV2
187
-
188
- mkdir(f"{self.layout_dir}/data", remove_content_if_exists=True)
189
-
190
- initialized = [mv(self) for mv in ALL_METRICS]
191
- if self.speedtest is not None:
192
- if len(self.speedtest["speedtest"]) < 2:
193
- SPEEDTEST_METRICS.pop()
194
- initialized = initialized + [mv(self) for mv in SPEEDTEST_METRICS]
195
- initialized = [mv for mv in initialized if self.cv_task.value in mv.cv_tasks]
196
- with self.pbar(
197
- message="Visualizations: Saving visualization files",
198
- total=len([w for mv in initialized for w in mv.schema]),
199
- ) as p:
200
- for mv in initialized:
201
- for widget in mv.schema:
202
- self._write_markdown_files(mv, widget)
203
- self._write_json_files(mv, widget)
204
- p.update(1)
205
-
206
- res = {}
207
- gallery = GridGalleryV2(
208
- columns_number=3,
209
- enable_zoom=False,
210
- annotations_opacity=0.4,
211
- border_width=4,
212
- default_tag_filters=[{"confidence": [self.f1_optimal_conf, 1]}],
213
- show_zoom_slider=False,
214
- )
215
- gallery._update_filters()
216
- res.update(gallery.get_json_state())
217
-
218
- self.dt_project_meta = self._get_filtered_project_meta(self.dt_project_info.id)
219
- res["projectMeta"] = self.dt_project_meta.to_json()
220
- for basename in ["modal_general.json", "modal_general_diff.json"]:
221
- local_path = f"{self.layout_dir}/data/{basename}"
222
- with open(local_path, "w", encoding="utf-8") as f:
223
- f.write(json.dumps(res))
224
- logger.info("Saved: %r", basename)
225
-
226
- self._save_template(initialized)
227
-
228
- def _write_markdown_files(self, metric_visualization: MetricVis, widget: Widget):
229
-
230
- if isinstance(widget, Widget.Markdown):
231
- content = metric_visualization.get_md_content(widget)
232
- local_path = f"{self.layout_dir}/data/{widget.name}.md"
233
- with open(local_path, "w", encoding="utf-8") as f:
234
- f.write(content)
235
-
236
- logger.info("Saved: %r", f"{widget.name}.md")
237
-
238
- if isinstance(widget, Widget.Collapse):
239
- for subwidget in widget.schema:
240
- content = metric_visualization.get_md_content(subwidget)
241
- local_path = f"{self.layout_dir}/data/{subwidget.name}.md"
242
- with open(local_path, "w", encoding="utf-8") as f:
243
- f.write(content)
244
-
245
- logger.info("Saved: %r", f"{subwidget.name}.md")
246
-
247
- def _write_json_files(self, mv: MetricVis, widget: Widget):
248
- if isinstance(widget, Widget.Chart):
249
- fig = mv.get_figure(widget)
250
- if fig is not None:
251
- fig_data = {
252
- "selected": None,
253
- "galleryContent": "",
254
- "dialogVisible": False,
255
- "chartContent": json.loads(fig.to_json()),
256
- }
257
- basename = f"{widget.name}_{mv.name}.json"
258
- local_path = f"{self.layout_dir}/data/{basename}"
259
- with open(local_path, "w", encoding="utf-8") as f:
260
- json.dump(fig_data, f)
261
- logger.info("Saved: %r", basename)
262
-
263
- click_data = mv.get_click_data(widget)
264
- if click_data is not None:
265
- basename = f"{widget.name}_{mv.name}_click_data.json"
266
- local_path = f"{self.layout_dir}/data/{basename}"
267
- with open(local_path, "w", encoding="utf-8") as f:
268
- f.write(json.dumps(click_data))
269
- logger.info("Saved: %r", basename)
270
-
271
- # modal_data = mv.get_modal_data(widget)
272
- # basename = f"{widget.name}_{mv.name}_modal_data.json"
273
- # local_path = f"{self.layout_dir}/data/{basename}"
274
- # with open(local_path, "w", encoding="utf-8") as f:
275
- # f.write(json.dumps(modal_data))
276
- # logger.info("Saved: %r", basename)
277
-
278
- if isinstance(widget, Widget.Gallery):
279
- content = mv.get_gallery(widget)
280
- if content is not None:
281
- basename = f"{widget.name}_{mv.name}.json"
282
- local_path = f"{self.layout_dir}/data/{basename}"
283
- with open(local_path, "w", encoding="utf-8") as f:
284
- f.write(json.dumps(content))
285
- logger.info("Saved: %r", basename)
286
-
287
- click_data = mv.get_gallery_click_data(widget)
288
- if click_data is not None:
289
- basename = f"{widget.name}_{mv.name}_click_data.json"
290
- local_path = f"{self.layout_dir}/data/{basename}"
291
- with open(local_path, "w", encoding="utf-8") as f:
292
- f.write(json.dumps(click_data))
293
- logger.info("Saved: %r", basename)
294
-
295
- diff_data = mv.get_diff_gallery_data(widget)
296
- if diff_data is not None:
297
- basename = f"{widget.name}_{mv.name}_diff_data.json"
298
- local_path = f"{self.layout_dir}/data/{basename}"
299
- with open(local_path, "w", encoding="utf-8") as f:
300
- f.write(json.dumps(diff_data))
301
- logger.info("Saved: %r", basename)
302
-
303
- # modal_data = mv.get_gallery_modal(widget)
304
- # basename = f"{widget.name}_{mv.name}_modal_data.json"
305
- # local_path = f"{self.layout_dir}/data/{basename}"
306
- # with open(local_path, "w", encoding="utf-8") as f:
307
- # f.write(json.dumps(modal_data))
308
- # logger.info("Saved: %r", basename)
309
-
310
- if isinstance(widget, Widget.Table):
311
- content = mv.get_table(widget)
312
- if content is not None:
313
- basename = f"{widget.name}_{mv.name}.json"
314
- local_path = f"{self.layout_dir}/data/{basename}"
315
- with open(local_path, "w", encoding="utf-8") as f:
316
- f.write(json.dumps(content))
317
- logger.info("Saved: %r", basename)
318
-
319
- if mv.clickable:
320
- content = mv.get_table_click_data(widget)
321
- basename = f"{widget.name}_{mv.name}_click_data.json"
322
- local_path = f"{self.layout_dir}/data/{basename}"
323
- with open(local_path, "w", encoding="utf-8") as f:
324
- f.write(json.dumps(content))
325
- logger.info("Saved: %r", basename)
326
-
327
- def _generate_template(self, metric_visualizations: Tuple[MetricVis]) -> str:
328
- html_snippets = {}
329
- main_template = Template(generate_main_template(metric_visualizations))
330
- for mv in metric_visualizations:
331
- for widget in mv.schema:
332
- if isinstance(widget, Widget.Notification):
333
- html_snippets.update(mv.get_html_snippets())
334
-
335
- html_snippets.update(mv.get_html_snippets())
336
-
337
- return main_template.render(**html_snippets)
338
-
339
- def _generate_state(self, metric_visualizations: Tuple[MetricVis]) -> dict:
340
- res = {}
341
- for mv in metric_visualizations:
342
- for widget in mv.schema:
343
- if isinstance(widget, Widget.Chart) and mv.switchable:
344
- res[mv.radiogroup_id] = widget.switch_key
345
- break
346
- return res
347
-
348
- def _save_template(self, metric_visualizations: Tuple[MetricVis]):
349
- local_path = f"{self.layout_dir}/template.vue"
350
- with open(local_path, "w", encoding="utf-8") as f:
351
- f.write(self._generate_template(metric_visualizations))
352
- logger.info("Saved: %r", "template.vue")
353
- local_path = f"{self.layout_dir}/state.json"
354
- with open(local_path, "w", encoding="utf-8") as f:
355
- json.dump(self._generate_state(metric_visualizations), f)
356
- logger.info("Saved: %r", "state.json")
357
-
358
- def _dump_key_metrics(self):
359
- key_metrics = self.mp.json_metrics()
360
- path = os.path.join(self._benchmark.get_base_dir(), "evaluation", "key_metrics.json")
361
- with open(path, "w", encoding="utf-8") as f:
362
- json.dump(key_metrics, f)
363
- return path
364
-
365
- def update_diff_annotations(self):
366
- meta = self._update_pred_meta_with_tags(self.dt_project_info.id, self.dt_project_meta)
367
- self._update_diff_meta(meta)
368
-
369
- self.dt_project_meta = meta
370
- self._add_tags_to_pred_project(self.mp.matches, self.dt_project_info.id)
371
- gt_project_path, pred_project_path = self._benchmark._download_projects(save_images=False)
372
-
373
- gt_project = Project(gt_project_path, OpenMode.READ)
374
- pred_project = Project(pred_project_path, OpenMode.READ)
375
- diff_dataset_id_to_info = {
376
- ds.id: ds
377
- for ds in self._api.dataset.get_list(self.diff_project_info.id, recursive=True)
378
- }
379
-
380
- def _get_full_name(ds_id: int):
381
- ds_info = diff_dataset_id_to_info[ds_id]
382
- if ds_info.parent_id is None:
383
- return ds_info.name
384
- return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
385
-
386
- diff_dataset_name_to_info = {
387
- _get_full_name(ds_id): ds_info for ds_id, ds_info in diff_dataset_id_to_info.items()
388
- }
389
-
390
- matched_id_map = self._get_matched_id_map() # dt_id -> gt_id
391
- matched_gt_ids = set(matched_id_map.values())
392
-
393
- outcome_tag = meta.get_tag_meta("outcome")
394
- conf_meta = meta.get_tag_meta("confidence")
395
- if conf_meta is None:
396
- conf_meta = meta.get_tag_meta("conf")
397
- match_tag = meta.get_tag_meta("matched_gt_id")
398
-
399
- pred_tag_list = []
400
- with self.pbar(
401
- message="Visualizations: Creating diff_project", total=pred_project.total_items
402
- ) as progress:
403
- logger.debug(
404
- "Creating diff project data",
405
- extra={
406
- "pred_project": [ds.name for ds in pred_project.datasets],
407
- "gt_project": [ds.name for ds in gt_project.datasets],
408
- },
409
- )
410
- for pred_dataset in pred_project.datasets:
411
- pred_dataset: Dataset
412
- gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
413
- diff_dataset_info = diff_dataset_name_to_info[pred_dataset.name]
414
- diff_anns = []
415
- gt_image_ids = []
416
- pred_img_ids = []
417
- for item_name in pred_dataset.get_items_names():
418
- gt_image_info = gt_dataset.get_image_info(item_name)
419
- gt_image_ids.append(gt_image_info.id)
420
- pred_image_info = pred_dataset.get_image_info(item_name)
421
- pred_img_ids.append(pred_image_info.id)
422
- gt_ann = gt_dataset.get_ann(item_name, gt_project.meta)
423
- pred_ann = pred_dataset.get_ann(item_name, pred_project.meta)
424
- labels = []
425
-
426
- # TP and FP
427
- for label in pred_ann.labels:
428
- match_tag_id = matched_id_map.get(label.geometry.sly_id)
429
- value = "TP" if match_tag_id else "FP"
430
- pred_tag_list.append(
431
- {
432
- "tagId": outcome_tag.sly_id,
433
- "figureId": label.geometry.sly_id,
434
- "value": value,
435
- }
436
- )
437
- conf = 1
438
- for tag in label.tags.items():
439
- tag: Tag
440
- if tag.name in ["confidence", "conf"]:
441
- conf = tag.value
442
- break
443
-
444
- if conf < self.f1_optimal_conf:
445
- continue # do not add labels with low confidence to diff project
446
- if match_tag_id:
447
- continue # do not add TP labels to diff project
448
- label = label.add_tag(Tag(outcome_tag, value))
449
- label = label.add_tag(Tag(match_tag, int(label.geometry.sly_id)))
450
- labels.append(label)
451
-
452
- # FN
453
- for label in gt_ann.labels:
454
- if self.classes_whitelist:
455
- if label.obj_class.name not in self.classes_whitelist:
456
- continue
457
- if label.geometry.sly_id not in matched_gt_ids:
458
- if self._is_label_compatible_to_cv_task(label):
459
- new_label = label.add_tags(
460
- [Tag(outcome_tag, "FN"), Tag(conf_meta, 1)]
461
- )
462
- labels.append(new_label)
463
-
464
- diff_ann = Annotation(gt_ann.img_size, labels)
465
- diff_anns.append(diff_ann)
466
-
467
- # comparison data
468
- self._update_comparison_data(
469
- gt_image_info.id,
470
- gt_image_info=gt_image_info,
471
- pred_image_info=pred_image_info,
472
- gt_annotation=gt_ann,
473
- pred_annotation=pred_ann,
474
- diff_annotation=diff_ann,
475
- )
476
-
477
- diff_img_infos = self._api.image.copy_batch(diff_dataset_info.id, pred_img_ids)
478
- self._api.annotation.upload_anns(
479
- [img_info.id for img_info in diff_img_infos],
480
- diff_anns,
481
- progress_cb=progress.update,
482
- )
483
- for gt_img_id, diff_img_info in zip(gt_image_ids, diff_img_infos):
484
- self._update_comparison_data(gt_img_id, diff_image_info=diff_img_info)
485
-
486
- self._api.image.tag.add_to_objects(self.dt_project_info.id, pred_tag_list)
487
-
488
- def _init_comparison_data(self):
489
- gt_project_path, pred_project_path = self._benchmark._download_projects(save_images=False)
490
- gt_project = Project(gt_project_path, OpenMode.READ)
491
- pred_project = Project(pred_project_path, OpenMode.READ)
492
- diff_dataset_id_to_info = {
493
- ds.id: ds
494
- for ds in self._api.dataset.get_list(self.diff_project_info.id, recursive=True)
495
- }
496
-
497
- def _get_full_name(ds_id: int):
498
- ds_info = diff_dataset_id_to_info[ds_id]
499
- if ds_info.parent_id is None:
500
- return ds_info.name
501
- return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
502
-
503
- diff_dataset_name_to_info = {
504
- _get_full_name(ds_id): ds_info for ds_id, ds_info in diff_dataset_id_to_info.items()
505
- }
506
-
507
- for pred_dataset in pred_project.datasets:
508
- pred_dataset: Dataset
509
- gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
510
- try:
511
- diff_dataset_info = diff_dataset_name_to_info[pred_dataset.name]
512
- except KeyError:
513
- raise RuntimeError(
514
- f"Difference project was not created properly. Dataset {pred_dataset.name} is missing"
515
- )
516
-
517
- for item_names_batch in batched(pred_dataset.get_items_names(), 100):
518
- # diff project may be not created yet
519
- item_names_batch.sort()
520
- try:
521
- diff_img_infos_batch: List[ImageInfo] = sorted(
522
- self._api.image.get_list(
523
- diff_dataset_info.id,
524
- filters=[
525
- {"field": "name", "operator": "in", "value": item_names_batch}
526
- ],
527
- ),
528
- key=lambda x: x.name,
529
- )
530
- diff_anns_batch_dict = {
531
- ann_info.image_id: Annotation.from_json(
532
- ann_info.annotation, self.diff_project_meta
533
- )
534
- for ann_info in self._api.annotation.download_batch(
535
- diff_dataset_info.id, [img_info.id for img_info in diff_img_infos_batch]
536
- )
537
- }
538
- assert (
539
- len(item_names_batch)
540
- == len(diff_img_infos_batch)
541
- == len(diff_anns_batch_dict)
542
- ), "Some images are missing in the difference project"
543
-
544
- for item_name, diff_img_info in zip(item_names_batch, diff_img_infos_batch):
545
- assert (
546
- item_name == diff_img_info.name
547
- ), "Image names in difference project and prediction project do not match"
548
- gt_image_info = gt_dataset.get_image_info(item_name)
549
- pred_image_info = pred_dataset.get_image_info(item_name)
550
- gt_ann = gt_dataset.get_ann(item_name, gt_project.meta)
551
- pred_ann = pred_dataset.get_ann(item_name, pred_project.meta)
552
- diff_ann = diff_anns_batch_dict[diff_img_info.id]
553
-
554
- self._update_comparison_data(
555
- gt_image_info.id,
556
- gt_image_info=gt_image_info,
557
- pred_image_info=pred_image_info,
558
- diff_image_info=diff_img_info,
559
- gt_annotation=gt_ann,
560
- pred_annotation=pred_ann,
561
- diff_annotation=diff_ann,
562
- )
563
- except Exception:
564
- raise RuntimeError("Difference project was not created properly")
565
-
566
- def _update_comparison_data(
567
- self,
568
- gt_image_id: int,
569
- gt_image_info: ImageInfo = None,
570
- pred_image_info: ImageInfo = None,
571
- diff_image_info: ImageInfo = None,
572
- gt_annotation: Annotation = None,
573
- pred_annotation: Annotation = None,
574
- diff_annotation: Annotation = None,
575
- ):
576
- comparison_data = self.comparison_data.get(gt_image_id, None)
577
- if comparison_data is None:
578
- self.comparison_data[gt_image_id] = ImageComparisonData(
579
- gt_image_info=gt_image_info,
580
- pred_image_info=pred_image_info,
581
- diff_image_info=diff_image_info,
582
- gt_annotation=gt_annotation,
583
- pred_annotation=pred_annotation,
584
- diff_annotation=diff_annotation,
585
- )
586
- else:
587
- for attr, value in {
588
- "gt_image_info": gt_image_info,
589
- "pred_image_info": pred_image_info,
590
- "diff_image_info": diff_image_info,
591
- "gt_annotation": gt_annotation,
592
- "pred_annotation": pred_annotation,
593
- "diff_annotation": diff_annotation,
594
- }.items():
595
- if value is not None:
596
- setattr(comparison_data, attr, value)
597
-
598
- def _update_pred_meta_with_tags(self, project_id: int, meta: ProjectMeta) -> ProjectMeta:
599
- old_meta = meta
600
- outcome_tag = TagMeta(
601
- "outcome",
602
- value_type=TagValueType.ONEOF_STRING,
603
- possible_values=["TP", "FP", "FN"],
604
- applicable_to=TagApplicableTo.OBJECTS_ONLY,
605
- )
606
- match_tag = TagMeta(
607
- "matched_gt_id",
608
- TagValueType.ANY_NUMBER,
609
- applicable_to=TagApplicableTo.OBJECTS_ONLY,
610
- )
611
- iou_tag = TagMeta(
612
- "iou",
613
- TagValueType.ANY_NUMBER,
614
- applicable_to=TagApplicableTo.OBJECTS_ONLY,
615
- )
616
- confidence_tag = TagMeta(
617
- "confidence",
618
- value_type=TagValueType.ANY_NUMBER,
619
- applicable_to=TagApplicableTo.OBJECTS_ONLY,
620
- )
621
-
622
- for tag in [outcome_tag, match_tag, iou_tag]:
623
- if meta.get_tag_meta(tag.name) is None:
624
- meta = meta.add_tag_meta(tag)
625
-
626
- if meta.get_tag_meta("confidence") is None and meta.get_tag_meta("conf") is None:
627
- meta = meta.add_tag_meta(confidence_tag)
628
-
629
- if old_meta == meta:
630
- return meta
631
-
632
- meta = self._api.project.update_meta(project_id, meta)
633
- return meta
634
-
635
- def _update_diff_meta(self, meta: ProjectMeta):
636
- new_obj_classes = []
637
- for obj_class in meta.obj_classes:
638
- new_obj_classes.append(obj_class.clone(geometry_type=AnyGeometry))
639
- meta = meta.clone(obj_classes=new_obj_classes)
640
- self.diff_project_meta = self._api.project.update_meta(self.diff_project_info.id, meta)
641
-
642
- def _update_diff_meta(self, meta: ProjectMeta):
643
- new_obj_classes = []
644
- for obj_class in meta.obj_classes:
645
- new_obj_classes.append(obj_class.clone(geometry_type=AnyGeometry))
646
- meta = meta.clone(obj_classes=new_obj_classes)
647
- self.diff_project_meta = self._api.project.update_meta(self.diff_project_info.id, meta)
648
-
649
- def _add_tags_to_pred_project(self, matches: list, pred_project_id: int):
650
-
651
- # get tag metas
652
- # outcome_tag_meta = self.dt_project_meta.get_tag_meta("outcome")
653
- match_tag_meta = self.dt_project_meta.get_tag_meta("matched_gt_id")
654
- iou_tag_meta = self.dt_project_meta.get_tag_meta("iou")
655
-
656
- # mappings
657
- gt_ann_mapping = self.click_data.gt_id_mapper.map_obj
658
- dt_ann_mapping = self.click_data.dt_id_mapper.map_obj
659
-
660
- # add tags to objects
661
- logger.info("Adding tags to DT project")
662
-
663
- with self.pbar(
664
- message="Visualizations: Adding tags to DT project", total=len(matches)
665
- ) as p:
666
- for batch in batched(matches, 100):
667
- pred_tag_list = []
668
- for match in batch:
669
- if match["type"] == "TP":
670
- outcome = "TP"
671
- matched_gt_id = gt_ann_mapping[match["gt_id"]]
672
- ann_dt_id = dt_ann_mapping[match["dt_id"]]
673
- iou = match["iou"]
674
- # api.advanced.add_tag_to_object(outcome_tag_meta.sly_id, ann_dt_id, str(outcome))
675
- if matched_gt_id is not None:
676
- pred_tag_list.extend(
677
- [
678
- {
679
- "tagId": match_tag_meta.sly_id,
680
- "figureId": ann_dt_id,
681
- "value": int(matched_gt_id),
682
- },
683
- {
684
- "tagId": iou_tag_meta.sly_id,
685
- "figureId": ann_dt_id,
686
- "value": float(iou),
687
- },
688
- ]
689
- )
690
- else:
691
- continue
692
- elif match["type"] == "FP":
693
- outcome = "FP"
694
- # api.advanced.add_tag_to_object(outcome_tag_meta.sly_id, ann_dt_id, str(outcome))
695
- elif match["type"] == "FN":
696
- outcome = "FN"
697
- else:
698
- raise ValueError(f"Unknown match type: {match['type']}")
699
-
700
- self._api.image.tag.add_to_objects(pred_project_id, pred_tag_list)
701
- p.update(len(batch))
702
-
703
- def _get_matched_id_map(self):
704
- gt_ann_mapping = self.click_data.gt_id_mapper.map_obj
705
- dt_ann_mapping = self.click_data.dt_id_mapper.map_obj
706
- dtId2matched_gt_id = {}
707
- for match in self.mp.matches_filtered:
708
- if match["type"] == "TP":
709
- dtId2matched_gt_id[dt_ann_mapping[match["dt_id"]]] = gt_ann_mapping[match["gt_id"]]
710
- return dtId2matched_gt_id
711
-
712
- def _is_label_compatible_to_cv_task(self, label: Label):
713
- if self.cv_task == CVTask.OBJECT_DETECTION:
714
- return isinstance(label.geometry, Rectangle)
715
- if self.cv_task == CVTask.INSTANCE_SEGMENTATION:
716
- return isinstance(label.geometry, (Bitmap, Polygon))
717
- return False
718
-
719
- def _get_filtered_project_meta(self, project_id: int) -> ProjectMeta:
720
- meta = self._api.project.get_meta(project_id)
721
- meta = ProjectMeta.from_json(meta)
722
- remove_classes = []
723
- if self.classes_whitelist:
724
- for obj_class in meta.obj_classes:
725
- if obj_class.name not in self.classes_whitelist:
726
- remove_classes.append(obj_class.name)
727
- if remove_classes:
728
- meta = meta.delete_obj_classes(remove_classes)
729
- return meta