supervisely 6.73.237__py3-none-any.whl → 6.73.239__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (138) hide show
  1. supervisely/annotation/annotation.py +2 -2
  2. supervisely/api/entity_annotation/tag_api.py +11 -4
  3. supervisely/geometry/rectangle.py +7 -8
  4. supervisely/nn/__init__.py +1 -0
  5. supervisely/nn/benchmark/__init__.py +14 -2
  6. supervisely/nn/benchmark/base_benchmark.py +84 -37
  7. supervisely/nn/benchmark/base_evaluator.py +120 -0
  8. supervisely/nn/benchmark/base_visualizer.py +265 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
  18. supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
  19. supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
  20. supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
  21. supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
  22. supervisely/nn/benchmark/object_detection/__init__.py +0 -0
  23. supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
  24. supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
  25. supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
  26. supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
  27. supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
  28. supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
  29. supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
  30. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
  31. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
  32. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
  33. supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
  34. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
  35. supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
  36. supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
  37. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
  38. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
  39. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
  40. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
  41. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
  42. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
  43. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
  44. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
  45. supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
  46. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
  47. supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
  48. supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
  49. supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
  50. supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
  51. supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
  52. supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
  53. supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
  54. supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
  55. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
  56. supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
  57. supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
  58. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
  59. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
  60. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
  61. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
  62. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
  63. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
  64. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
  65. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
  66. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
  67. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
  68. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
  69. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
  70. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
  71. supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
  72. supervisely/nn/benchmark/utils/__init__.py +12 -0
  73. supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
  74. supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
  75. supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
  76. supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
  77. supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
  78. supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
  79. supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
  80. supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
  81. supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
  82. supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
  83. supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
  84. supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
  85. supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
  86. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
  87. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
  88. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
  89. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
  90. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
  91. supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
  92. supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
  93. supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
  94. supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
  95. supervisely/project/project.py +18 -6
  96. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/METADATA +3 -1
  97. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/RECORD +104 -82
  98. supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
  99. supervisely/nn/benchmark/evaluation/__init__.py +0 -3
  100. supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
  101. supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
  102. supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
  103. supervisely/nn/benchmark/utils.py +0 -13
  104. supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
  105. supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
  106. supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
  107. supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
  108. supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
  109. supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
  110. supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
  111. supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
  112. supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
  113. supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
  114. supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
  115. supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
  116. supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
  117. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
  118. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
  119. supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
  120. supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
  121. supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
  122. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
  123. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
  124. supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
  125. supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
  126. supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
  127. supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
  128. supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
  129. supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
  130. supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
  131. supervisely/nn/benchmark/visualization/visualizer.py +0 -729
  132. /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
  133. /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
  134. /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
  135. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/LICENSE +0 -0
  136. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/WHEEL +0 -0
  137. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/entry_points.txt +0 -0
  138. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,697 @@
1
+ import random
2
+ from pathlib import Path
3
+ from typing import List
4
+
5
+ import supervisely.nn.benchmark.object_detection.text_templates as vis_texts
6
+ from supervisely._utils import batched
7
+ from supervisely.annotation.annotation import Annotation
8
+ from supervisely.annotation.label import Label
9
+ from supervisely.annotation.tag import Tag
10
+ from supervisely.annotation.tag_meta import TagApplicableTo, TagMeta, TagValueType
11
+ from supervisely.api.image_api import ImageInfo
12
+ from supervisely.api.module_api import ApiField
13
+ from supervisely.geometry.any_geometry import AnyGeometry
14
+ from supervisely.geometry.bitmap import Bitmap
15
+ from supervisely.geometry.polygon import Polygon
16
+ from supervisely.geometry.rectangle import Rectangle
17
+ from supervisely.nn.benchmark.base_visualizer import BaseVisualizer
18
+ from supervisely.nn.benchmark.cv_tasks import CVTask
19
+ from supervisely.nn.benchmark.object_detection.vis_metrics import (
20
+ ConfidenceDistribution,
21
+ ConfidenceScore,
22
+ ConfusionMatrix,
23
+ ExplorePredictions,
24
+ F1ScoreAtDifferentIOU,
25
+ FrequentlyConfused,
26
+ IOUDistribution,
27
+ KeyMetrics,
28
+ ModelPredictions,
29
+ OutcomeCounts,
30
+ Overview,
31
+ PerClassAvgPrecision,
32
+ PerClassOutcomeCounts,
33
+ PRCurve,
34
+ PRCurveByClass,
35
+ Precision,
36
+ Recall,
37
+ RecallVsPrecision,
38
+ ReliabilityDiagram,
39
+ Speedtest,
40
+ )
41
+ from supervisely.nn.benchmark.visualization.widgets import (
42
+ ContainerWidget,
43
+ MarkdownWidget,
44
+ SidebarWidget,
45
+ )
46
+ from supervisely.project.project import Dataset, OpenMode, Project
47
+ from supervisely.project.project_meta import ProjectMeta
48
+ from supervisely.sly_logger import logger
49
+
50
+
51
+ class ObjectDetectionVisualizer(BaseVisualizer):
52
+ def __init__(self, *args, **kwargs):
53
+ super().__init__(*args, **kwargs)
54
+
55
+ self.vis_texts = vis_texts
56
+ self._widgets = False
57
+ self.ann_opacity = 0.4
58
+
59
+ diff_project_info, diff_dataset_infos, existed = self._get_or_create_diff_project()
60
+ self.eval_result.diff_project_info = diff_project_info
61
+ self.eval_result.diff_dataset_infos = diff_dataset_infos
62
+ self.eval_result.matched_pair_data = {}
63
+
64
+ self.gt_project_path = str(Path(self.workdir).parent / "gt_project")
65
+ self.pred_project_path = str(Path(self.workdir).parent / "pred_project")
66
+ if not existed:
67
+ self.update_diff_annotations()
68
+ else:
69
+ self._init_match_data()
70
+
71
+ # set filtered project meta
72
+ self.eval_result.filtered_project_meta = self._get_filtered_project_meta(self.eval_result)
73
+
74
+ self._get_sample_data_for_gallery()
75
+
76
+ @property
77
+ def cv_task(self):
78
+ return CVTask.OBJECT_DETECTION
79
+
80
+ def _create_widgets(self):
81
+ # get cv task
82
+ # Modal Gellery
83
+ self.diff_modal = self._create_diff_modal_table()
84
+ self.explore_modal = self._create_explore_modal_table(
85
+ click_gallery_id=self.diff_modal.id, hover_text="Compare with GT"
86
+ )
87
+
88
+ # Notifcation
89
+ self.clickable_label = self._create_clickable_label()
90
+
91
+ # Overview
92
+ me = self.api.user.get_my_info()
93
+ overview = Overview(self.vis_texts, self.eval_result)
94
+ self.header = overview.get_header(me.login)
95
+ self.overview_md = overview.md
96
+
97
+ # Key Metrics
98
+ key_metrics = KeyMetrics(self.vis_texts, self.eval_result)
99
+ self.key_metrics_md = key_metrics.md
100
+ self.key_metrics_table = key_metrics.table
101
+ self.overview_chart = key_metrics.chart
102
+
103
+ # Explore Predictions
104
+ explore_predictions = ExplorePredictions(
105
+ self.vis_texts, self.eval_result, self.explore_modal, self.diff_modal
106
+ )
107
+ self.explore_predictions_md = explore_predictions.md
108
+ self.explore_predictions_gallery = explore_predictions.gallery(opacity=self.ann_opacity)
109
+
110
+ # Model Predictions
111
+ model_predictions = ModelPredictions(self.vis_texts, self.eval_result, self.diff_modal)
112
+ self.model_predictions_md = model_predictions.md
113
+ self.model_predictions_table = model_predictions.table
114
+
115
+ # Outcome Counts
116
+ outcome_counts = OutcomeCounts(self.vis_texts, self.eval_result, self.explore_modal)
117
+ self.outcome_counts_md = outcome_counts.md
118
+ self.outcome_counts_chart = outcome_counts.chart
119
+
120
+ # Recall
121
+ recall = Recall(self.vis_texts, self.eval_result, self.explore_modal)
122
+ self.recall_md = recall.md
123
+ self.recall_notificaiton = recall.notification
124
+ self.recall_per_class_md = recall.per_class_md
125
+ self.recall_chart = recall.chart
126
+
127
+ # Precision
128
+ precision = Precision(self.vis_texts, self.eval_result, self.explore_modal)
129
+ self.precision_md = precision.md
130
+ self.precision_notification = precision.notification
131
+ self.precision_per_class_md = precision.per_class_md
132
+ self.precision_chart = precision.chart
133
+
134
+ # RecallVsPrecision
135
+ recall_vs_precision = RecallVsPrecision(
136
+ self.vis_texts, self.eval_result, self.explore_modal
137
+ )
138
+ self.recall_vs_precision_md = recall_vs_precision.md
139
+ self.recall_vs_precision_chart = recall_vs_precision.chart
140
+
141
+ # PRCurve
142
+ pr_curve = PRCurve(self.vis_texts, self.eval_result)
143
+ self.pr_curve_md = pr_curve.md
144
+ self.pr_curve_notificaiton = pr_curve.notification
145
+ self.pr_curve_chart = pr_curve.chart
146
+ self.pr_curve_collapse = pr_curve.collapse
147
+
148
+ # PRCurveByClass
149
+ pr_curve_by_class = PRCurveByClass(self.vis_texts, self.eval_result, self.explore_modal)
150
+ self.pr_curve_by_class_md = pr_curve_by_class.md
151
+ self.pr_curve_by_class_chart = pr_curve_by_class.chart
152
+
153
+ # ConfusionMatrix
154
+ confusion_matrix = ConfusionMatrix(self.vis_texts, self.eval_result, self.explore_modal)
155
+ self.confusion_matrix_md = confusion_matrix.md
156
+ self.confusion_matrix_chart = confusion_matrix.chart
157
+
158
+ # FrequentlyConfused
159
+ frequently_confused = FrequentlyConfused(
160
+ self.vis_texts, self.eval_result, self.explore_modal
161
+ )
162
+ self.frequently_confused_present = frequently_confused.is_empty is False
163
+ if self.frequently_confused_present:
164
+ self.frequently_confused_md = frequently_confused.md
165
+ self.frequently_confused_chart = frequently_confused.chart
166
+ else:
167
+ self.frequently_confused_md = frequently_confused.empty_md
168
+
169
+ # IOUDistribution
170
+ iou_distribution = IOUDistribution(self.vis_texts, self.eval_result)
171
+ if self.cv_task in [CVTask.INSTANCE_SEGMENTATION, CVTask.SEMANTIC_SEGMENTATION]:
172
+ iou_distribution.md_title = "Mask Accuracy (IoU)"
173
+ self.iou_distribution_md = iou_distribution.md
174
+ self.iou_distribution_md_iou_distribution = iou_distribution.md_iou_distribution
175
+ self.iou_distribution_notification = iou_distribution.notification
176
+ self.iou_distribution_chart = iou_distribution.chart
177
+
178
+ # ReliabilityDiagram
179
+ reliability_diagram = ReliabilityDiagram(self.vis_texts, self.eval_result)
180
+ self.reliability_diagram_md_calibration_score = reliability_diagram.md_calibration_score
181
+ self.reliability_diagram_collapse_1 = reliability_diagram.collapse_tip
182
+ self.reliability_diagram_md_calibration_score_2 = reliability_diagram.md_calibration_score_2
183
+ self.reliability_diagram_md_reliability_diagram = reliability_diagram.md_reliability_diagram
184
+ self.reliability_diagram_notification = reliability_diagram.notification
185
+ self.reliability_diagram_chart = reliability_diagram.chart
186
+ self.reliability_diagram_collapse_2 = reliability_diagram.collapse
187
+
188
+ # ConfidenceScore
189
+ confidence_score = ConfidenceScore(self.vis_texts, self.eval_result)
190
+ self.confidence_score_md_confidence_score = confidence_score.md_confidence_score
191
+ self.confidence_score_notification = confidence_score.notification
192
+ self.confidence_score_chart = confidence_score.chart
193
+ self.confidence_score_md_confidence_score_2 = confidence_score.md_confidence_score_2
194
+ self.confidence_score_collapse_conf_score = confidence_score.collapse_conf_score
195
+ self.confidence_score_md_confidence_score_3 = confidence_score.md_confidence_score_3
196
+
197
+ # F1ScoreAtDifferentIOU
198
+ f1_score_at_different_iou = F1ScoreAtDifferentIOU(self.vis_texts, self.eval_result)
199
+ self.f1_score_at_different_iou_md = f1_score_at_different_iou.md
200
+ self.f1_score_at_different_iou_chart = f1_score_at_different_iou.chart
201
+
202
+ # ConfidenceDistribution
203
+ confidence_distribution = ConfidenceDistribution(self.vis_texts, self.eval_result)
204
+ self.confidence_distribution_md = confidence_distribution.md
205
+ self.confidence_distribution_chart = confidence_distribution.chart
206
+
207
+ # PerClassAvgPrecision
208
+ per_class_avg_precision = PerClassAvgPrecision(
209
+ self.vis_texts, self.eval_result, self.explore_modal
210
+ )
211
+ self.per_class_avg_precision_md = per_class_avg_precision.md
212
+ self.per_class_avg_precision_chart = per_class_avg_precision.chart
213
+
214
+ # PerClassOutcomeCounts
215
+ per_class_outcome_counts = PerClassOutcomeCounts(
216
+ self.vis_texts, self.eval_result, self.explore_modal
217
+ )
218
+ self.per_class_outcome_counts_md = per_class_outcome_counts.md
219
+ self.per_class_outcome_counts_md_2 = per_class_outcome_counts.md_2
220
+ self.per_class_outcome_counts_collapse = per_class_outcome_counts.collapse
221
+ self.per_class_outcome_counts_chart = per_class_outcome_counts.chart
222
+
223
+ # Speedtest init here for overview
224
+ speedtest = Speedtest(self.vis_texts, self.eval_result)
225
+ self.speedtest_present = False
226
+ self.speedtest_batch_sizes_cnt = speedtest.num_batche_sizes
227
+ if not speedtest.is_empty():
228
+ self.speedtest_present = True
229
+ self.speedtest_md_intro = speedtest.intro_md
230
+ self.speedtest_table_md = speedtest.table_md
231
+ self.speedtest_table = speedtest.table
232
+ if self.speedtest_batch_sizes_cnt > 1:
233
+ self.speedtest_chart_md = speedtest.chart_md
234
+ self.speedtest_chart = speedtest.chart
235
+
236
+ self._widgets = True
237
+
238
+ def _create_layout(self):
239
+ if not self._widgets:
240
+ self._create_widgets()
241
+
242
+ is_anchors_widgets = [
243
+ # Overview
244
+ (0, self.header),
245
+ (1, self.overview_md),
246
+ # KeyMetrics
247
+ (1, self.key_metrics_md),
248
+ (0, self.key_metrics_table),
249
+ (0, self.overview_chart),
250
+ # ExplorePredictions
251
+ (1, self.explore_predictions_md),
252
+ (0, self.explore_predictions_gallery),
253
+ # ModelPredictions
254
+ (1, self.model_predictions_md),
255
+ (0, self.model_predictions_table),
256
+ # OutcomeCounts
257
+ (1, self.outcome_counts_md),
258
+ (0, self.clickable_label),
259
+ (0, self.outcome_counts_chart),
260
+ # Recall
261
+ (1, self.recall_md),
262
+ (0, self.recall_notificaiton),
263
+ (0, self.recall_per_class_md),
264
+ (0, self.clickable_label),
265
+ (0, self.recall_chart),
266
+ # Precision
267
+ (1, self.precision_md),
268
+ (0, self.precision_notification),
269
+ (0, self.precision_per_class_md),
270
+ (0, self.clickable_label),
271
+ (0, self.precision_chart),
272
+ # RecallVsPrecision
273
+ (1, self.recall_vs_precision_md),
274
+ (0, self.clickable_label),
275
+ (0, self.recall_vs_precision_chart),
276
+ # PRCurve
277
+ (1, self.pr_curve_md),
278
+ (0, self.pr_curve_notificaiton),
279
+ (0, self.pr_curve_chart),
280
+ (0, self.pr_curve_collapse),
281
+ # PRCurveByClass
282
+ (0, self.pr_curve_by_class_md),
283
+ (0, self.clickable_label),
284
+ (0, self.pr_curve_by_class_chart),
285
+ # ConfusionMatrix
286
+ (1, self.confusion_matrix_md),
287
+ (0, self.clickable_label),
288
+ (0, self.confusion_matrix_chart),
289
+ # FrequentlyConfused
290
+ (1, self.frequently_confused_md),
291
+ ]
292
+ if self.frequently_confused_present:
293
+ is_anchors_widgets.append((0, self.clickable_label))
294
+ is_anchors_widgets.append((0, self.frequently_confused_chart))
295
+
296
+ is_anchors_widgets.extend(
297
+ [
298
+ # IOUDistribution
299
+ (1, self.iou_distribution_md),
300
+ (0, self.iou_distribution_md_iou_distribution),
301
+ (0, self.iou_distribution_notification),
302
+ (0, self.iou_distribution_chart),
303
+ # ReliabilityDiagram
304
+ (1, self.reliability_diagram_md_calibration_score),
305
+ (0, self.reliability_diagram_collapse_1),
306
+ (0, self.reliability_diagram_md_calibration_score_2),
307
+ (1, self.reliability_diagram_md_reliability_diagram),
308
+ (0, self.reliability_diagram_notification),
309
+ (0, self.reliability_diagram_chart),
310
+ (0, self.reliability_diagram_collapse_2),
311
+ # ConfidenceScore
312
+ (1, self.confidence_score_md_confidence_score),
313
+ (0, self.confidence_score_notification),
314
+ (0, self.confidence_score_chart),
315
+ (0, self.confidence_score_md_confidence_score_2),
316
+ (0, self.confidence_score_collapse_conf_score),
317
+ (0, self.confidence_score_md_confidence_score_3),
318
+ # F1ScoreAtDifferentIOU
319
+ (1, self.f1_score_at_different_iou_md),
320
+ (0, self.f1_score_at_different_iou_chart),
321
+ # ConfidenceDistribution
322
+ (1, self.confidence_distribution_md),
323
+ (0, self.confidence_distribution_chart),
324
+ # PerClassAvgPrecision
325
+ (1, self.per_class_avg_precision_md),
326
+ (0, self.clickable_label),
327
+ (0, self.per_class_avg_precision_chart),
328
+ # PerClassOutcomeCounts
329
+ (1, self.per_class_outcome_counts_md),
330
+ (0, self.per_class_outcome_counts_md_2),
331
+ (0, self.per_class_outcome_counts_collapse),
332
+ (0, self.clickable_label),
333
+ (0, self.per_class_outcome_counts_chart),
334
+ ]
335
+ )
336
+
337
+ if self.speedtest_present:
338
+ # SpeedTest
339
+ is_anchors_widgets.append((1, self.speedtest_md_intro))
340
+ is_anchors_widgets.append((0, self.speedtest_table_md))
341
+ is_anchors_widgets.append((0, self.speedtest_table))
342
+ if self.speedtest_batch_sizes_cnt > 1:
343
+ is_anchors_widgets.append((0, self.speedtest_chart_md))
344
+ is_anchors_widgets.append((0, self.speedtest_chart))
345
+ anchors = []
346
+ for is_anchor, widget in is_anchors_widgets:
347
+ if is_anchor:
348
+ anchors.append(widget.id)
349
+
350
+ sidebar = SidebarWidget(widgets=[i[1] for i in is_anchors_widgets], anchors=anchors)
351
+ layout = ContainerWidget(
352
+ widgets=[sidebar, self.explore_modal, self.diff_modal],
353
+ name="main_container",
354
+ )
355
+ return layout
356
+
357
+ def _create_clickable_label(self):
358
+ return MarkdownWidget(name="clickable_label", title="", text=self.vis_texts.clickable_label)
359
+
360
+ def update_diff_annotations(self):
361
+ pred_project_id = self.eval_result.pred_project_id
362
+ pred_project_meta = self.eval_result.pred_project_meta
363
+ meta = self._update_pred_meta_with_tags(pred_project_id, pred_project_meta)
364
+ self.eval_result.pred_project_meta = meta
365
+
366
+ self._update_diff_meta(meta)
367
+
368
+ self._add_tags_to_pred_project(
369
+ self.eval_result.mp.matches, self.eval_result.pred_project_id
370
+ )
371
+
372
+ gt_project = Project(self.gt_project_path, OpenMode.READ)
373
+ pred_project = Project(self.pred_project_path, OpenMode.READ)
374
+ diff_dataset_id_map = {ds.id: ds for ds in self.eval_result.diff_dataset_infos}
375
+ logger.info(f"Diff datasets names: {[ds.name for ds in diff_dataset_id_map.values()]}")
376
+
377
+ def _get_full_name(ds_id: int):
378
+ ds_info = diff_dataset_id_map[ds_id]
379
+ if ds_info.parent_id is None:
380
+ return ds_info.name
381
+ return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
382
+
383
+ diff_dataset_name_map = {_get_full_name(i): ds for i, ds in diff_dataset_id_map.items()}
384
+
385
+ matched_id_map = self._get_matched_id_map() # dt_id -> gt_id
386
+ matched_gt_ids = set(matched_id_map.values())
387
+
388
+ outcome_tag = meta.get_tag_meta("outcome")
389
+ conf_meta = meta.get_tag_meta("confidence")
390
+ if conf_meta is None:
391
+ conf_meta = meta.get_tag_meta("conf")
392
+ match_tag = meta.get_tag_meta("matched_gt_id")
393
+
394
+ pred_tag_list = []
395
+ with self.pbar(
396
+ message="Visualizations: Creating diff_project", total=pred_project.total_items
397
+ ) as progress:
398
+ logger.debug(
399
+ "Creating diff project data",
400
+ extra={
401
+ "pred_project": [ds.name for ds in pred_project.datasets],
402
+ "gt_project": [ds.name for ds in gt_project.datasets],
403
+ },
404
+ )
405
+ for pred_dataset in pred_project.datasets:
406
+ pred_dataset: Dataset
407
+ gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
408
+ diff_dataset_info = diff_dataset_name_map[pred_dataset.name]
409
+ for batch_names in batched(pred_dataset.get_items_names(), 100):
410
+ diff_anns = []
411
+ gt_image_ids = []
412
+ pred_img_ids = []
413
+ for item_name in batch_names:
414
+ gt_image_info = gt_dataset.get_image_info(item_name)
415
+ gt_image_ids.append(gt_image_info.id)
416
+ pred_image_info = pred_dataset.get_image_info(item_name)
417
+ pred_img_ids.append(pred_image_info.id)
418
+ gt_ann = gt_dataset.get_ann(item_name, gt_project.meta)
419
+ pred_ann = pred_dataset.get_ann(item_name, pred_project.meta)
420
+ labels = []
421
+
422
+ # TP and FP
423
+ for label in pred_ann.labels:
424
+ match_tag_id = matched_id_map.get(label.geometry.sly_id)
425
+ value = "TP" if match_tag_id else "FP"
426
+ pred_tag_list.append(
427
+ {
428
+ "tagId": outcome_tag.sly_id,
429
+ "figureId": label.geometry.sly_id,
430
+ "value": value,
431
+ }
432
+ )
433
+ conf = 1
434
+ for tag in label.tags.items():
435
+ tag: Tag
436
+ if tag.name in ["confidence", "conf"]:
437
+ conf = tag.value
438
+ break
439
+
440
+ if conf < self.eval_result.mp.f1_optimal_conf:
441
+ continue # do not add labels with low confidence to diff project
442
+ if match_tag_id:
443
+ continue # do not add TP labels to diff project
444
+ label = label.add_tag(Tag(outcome_tag, value))
445
+ label = label.add_tag(Tag(match_tag, int(label.geometry.sly_id)))
446
+ labels.append(label)
447
+
448
+ # FN
449
+ for label in gt_ann.labels:
450
+ if self.eval_result.classes_whitelist:
451
+ if label.obj_class.name not in self.eval_result.classes_whitelist:
452
+ continue
453
+ if label.geometry.sly_id not in matched_gt_ids:
454
+ if self._is_label_compatible_to_cv_task(label):
455
+ new_label = label.add_tags(
456
+ [Tag(outcome_tag, "FN"), Tag(conf_meta, 1)]
457
+ )
458
+ labels.append(new_label)
459
+
460
+ diff_ann = Annotation(gt_ann.img_size, labels)
461
+ diff_anns.append(diff_ann)
462
+
463
+ # comparison data
464
+ self._update_match_data(
465
+ gt_image_info.id,
466
+ gt_image_info=gt_image_info,
467
+ pred_image_info=pred_image_info,
468
+ gt_annotation=gt_ann,
469
+ pred_annotation=pred_ann,
470
+ diff_annotation=diff_ann,
471
+ )
472
+
473
+ diff_img_infos = self.api.image.copy_batch(diff_dataset_info.id, pred_img_ids)
474
+ ids = [img.id for img in diff_img_infos]
475
+ self.api.annotation.upload_anns(ids, diff_anns, progress_cb=progress.update)
476
+ for gt_img_id, diff_img_info in zip(gt_image_ids, diff_img_infos):
477
+ self._update_match_data(gt_img_id, diff_image_info=diff_img_info)
478
+
479
+ with self.pbar(
480
+ message="Visualizations: Append tags to predictions", total=len(pred_tag_list)
481
+ ) as p:
482
+ self.api.image.tag.add_to_objects(
483
+ self.eval_result.pred_project_id, pred_tag_list, progress=p
484
+ )
485
+
486
+ def _init_match_data(self):
487
+ gt_project = Project(self.gt_project_path, OpenMode.READ)
488
+ pred_project = Project(self.pred_project_path, OpenMode.READ)
489
+ diff_dataset_id_map = {ds.id: ds for ds in self.eval_result.diff_dataset_infos}
490
+ logger.info(f"Diff datasets names: {[ds.name for ds in diff_dataset_id_map.values()]}")
491
+
492
+ def _get_full_name(ds_id: int):
493
+ ds_info = diff_dataset_id_map[ds_id]
494
+ if ds_info.parent_id is None:
495
+ return ds_info.name
496
+ return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
497
+
498
+ diff_dataset_name_map = {_get_full_name(i): ds for i, ds in diff_dataset_id_map.items()}
499
+
500
+ meta_json = self.api.project.get_meta(self.eval_result.diff_project_info.id)
501
+ self.eval_result.diff_project_meta = ProjectMeta.from_json(meta_json)
502
+
503
+ with self.pbar(
504
+ message="Visualizations: Initializing match data", total=pred_project.total_items
505
+ ) as p:
506
+ for pred_dataset in pred_project.datasets:
507
+ pred_dataset: Dataset
508
+ gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
509
+ try:
510
+ diff_dataset_info = diff_dataset_name_map[pred_dataset.name]
511
+ except KeyError:
512
+ raise RuntimeError(
513
+ f"Difference project was not created properly. Dataset {pred_dataset.name} is missing"
514
+ )
515
+
516
+ for item_names_batch in batched(pred_dataset.get_items_names(), 50):
517
+ # diff project may be not created yet
518
+ item_names_batch.sort()
519
+ try:
520
+ diff_img_infos_batch: List[ImageInfo] = sorted(
521
+ self.api.image.get_list(
522
+ diff_dataset_info.id,
523
+ filters=[
524
+ {
525
+ ApiField.FIELD: ApiField.NAME,
526
+ ApiField.OPERATOR: "in",
527
+ ApiField.VALUE: item_names_batch,
528
+ }
529
+ ],
530
+ force_metadata_for_links=False,
531
+ ),
532
+ key=lambda x: x.name,
533
+ )
534
+ diff_anns_batch_dict = {
535
+ ann_info.image_id: Annotation.from_json(
536
+ ann_info.annotation, self.eval_result.diff_project_meta
537
+ )
538
+ for ann_info in self.api.annotation.download_batch(
539
+ diff_dataset_info.id,
540
+ [img_info.id for img_info in diff_img_infos_batch],
541
+ force_metadata_for_links=False,
542
+ )
543
+ }
544
+ assert (
545
+ len(item_names_batch)
546
+ == len(diff_img_infos_batch)
547
+ == len(diff_anns_batch_dict)
548
+ ), "Some images are missing in the difference project"
549
+
550
+ for item_name, diff_img_info in zip(item_names_batch, diff_img_infos_batch):
551
+ assert (
552
+ item_name == diff_img_info.name
553
+ ), "Image names in difference project and prediction project do not match"
554
+ gt_image_info = gt_dataset.get_image_info(item_name)
555
+ pred_image_info = pred_dataset.get_image_info(item_name)
556
+ gt_ann = gt_dataset.get_ann(item_name, gt_project.meta)
557
+ pred_ann = pred_dataset.get_ann(item_name, pred_project.meta)
558
+ diff_ann = diff_anns_batch_dict[diff_img_info.id]
559
+
560
+ self._update_match_data(
561
+ gt_image_info.id,
562
+ gt_image_info=gt_image_info,
563
+ pred_image_info=pred_image_info,
564
+ diff_image_info=diff_img_info,
565
+ gt_annotation=gt_ann,
566
+ pred_annotation=pred_ann,
567
+ diff_annotation=diff_ann,
568
+ )
569
+
570
+ p.update(len(item_names_batch))
571
+ except Exception:
572
+ raise RuntimeError("Difference project was not created properly")
573
+
574
+ def _update_pred_meta_with_tags(self, project_id: int, meta: ProjectMeta) -> ProjectMeta:
575
+ old_meta = meta
576
+ outcome_tag = TagMeta(
577
+ "outcome",
578
+ value_type=TagValueType.ONEOF_STRING,
579
+ possible_values=["TP", "FP", "FN"],
580
+ applicable_to=TagApplicableTo.OBJECTS_ONLY,
581
+ )
582
+ match_tag = TagMeta(
583
+ "matched_gt_id",
584
+ TagValueType.ANY_NUMBER,
585
+ applicable_to=TagApplicableTo.OBJECTS_ONLY,
586
+ )
587
+ iou_tag = TagMeta(
588
+ "iou",
589
+ TagValueType.ANY_NUMBER,
590
+ applicable_to=TagApplicableTo.OBJECTS_ONLY,
591
+ )
592
+ confidence_tag = TagMeta(
593
+ "confidence",
594
+ value_type=TagValueType.ANY_NUMBER,
595
+ applicable_to=TagApplicableTo.OBJECTS_ONLY,
596
+ )
597
+
598
+ for tag in [outcome_tag, match_tag, iou_tag]:
599
+ if meta.get_tag_meta(tag.name) is None:
600
+ meta = meta.add_tag_meta(tag)
601
+
602
+ if meta.get_tag_meta("confidence") is None and meta.get_tag_meta("conf") is None:
603
+ meta = meta.add_tag_meta(confidence_tag)
604
+
605
+ if old_meta == meta:
606
+ return meta
607
+
608
+ meta = self.api.project.update_meta(project_id, meta)
609
+ return meta
610
+
611
+ def _update_diff_meta(self, meta: ProjectMeta):
612
+ new_obj_classes = []
613
+ for obj_class in meta.obj_classes:
614
+ new_obj_classes.append(obj_class.clone(geometry_type=AnyGeometry))
615
+ meta = meta.clone(obj_classes=new_obj_classes)
616
+ self.eval_result.diff_project_meta = self.api.project.update_meta(
617
+ self.eval_result.diff_project_info.id, meta
618
+ )
619
+
620
+ def _add_tags_to_pred_project(self, matches: list, pred_project_id: int):
621
+
622
+ # get tag metas
623
+ # outcome_tag_meta = self.dt_project_meta.get_tag_meta("outcome")
624
+ match_tag_meta = self.eval_result.pred_project_meta.get_tag_meta("matched_gt_id")
625
+ iou_tag_meta = self.eval_result.pred_project_meta.get_tag_meta("iou")
626
+
627
+ # mappings
628
+ gt_ann_mapping = self.eval_result.click_data.gt_id_mapper.map_obj
629
+ dt_ann_mapping = self.eval_result.click_data.dt_id_mapper.map_obj
630
+
631
+ # add tags to objects
632
+ logger.info("Adding tags to DT project")
633
+
634
+ with self.pbar(
635
+ message="Visualizations: Adding tags to predictions", total=len(matches)
636
+ ) as p:
637
+ for batch in batched(matches, 100):
638
+ pred_tag_list = []
639
+ for match in batch:
640
+ if match["type"] == "TP":
641
+ outcome = "TP"
642
+ matched_gt_id = gt_ann_mapping[match["gt_id"]]
643
+ ann_dt_id = dt_ann_mapping[match["dt_id"]]
644
+ iou = match["iou"]
645
+ # api.advanced.add_tag_to_object(outcome_tag_meta.sly_id, ann_dt_id, str(outcome))
646
+ if matched_gt_id is not None:
647
+ pred_tag_list.extend(
648
+ [
649
+ {
650
+ "tagId": match_tag_meta.sly_id,
651
+ "figureId": ann_dt_id,
652
+ "value": int(matched_gt_id),
653
+ },
654
+ {
655
+ "tagId": iou_tag_meta.sly_id,
656
+ "figureId": ann_dt_id,
657
+ "value": float(iou),
658
+ },
659
+ ]
660
+ )
661
+ else:
662
+ continue
663
+ elif match["type"] == "FP":
664
+ outcome = "FP"
665
+ # api.advanced.add_tag_to_object(outcome_tag_meta.sly_id, ann_dt_id, str(outcome))
666
+ elif match["type"] == "FN":
667
+ outcome = "FN"
668
+ else:
669
+ raise ValueError(f"Unknown match type: {match['type']}")
670
+
671
+ self.api.image.tag.add_to_objects(pred_project_id, pred_tag_list)
672
+ p.update(len(batch))
673
+
674
+ def _get_matched_id_map(self):
675
+ gt_ann_mapping = self.eval_result.click_data.gt_id_mapper.map_obj
676
+ dt_ann_mapping = self.eval_result.click_data.dt_id_mapper.map_obj
677
+ dtId2matched_gt_id = {}
678
+ for match in self.eval_result.mp.matches_filtered:
679
+ if match["type"] == "TP":
680
+ dtId2matched_gt_id[dt_ann_mapping[match["dt_id"]]] = gt_ann_mapping[match["gt_id"]]
681
+ return dtId2matched_gt_id
682
+
683
+ def _is_label_compatible_to_cv_task(self, label: Label):
684
+ if self.cv_task == CVTask.OBJECT_DETECTION:
685
+ return isinstance(label.geometry, Rectangle)
686
+ elif self.cv_task == CVTask.INSTANCE_SEGMENTATION:
687
+ return isinstance(label.geometry, (Bitmap, Polygon))
688
+ elif self.cv_task == CVTask.SEMANTIC_SEGMENTATION:
689
+ return isinstance(label.geometry, Bitmap)
690
+ return False
691
+
692
+ def _get_sample_data_for_gallery(self):
693
+ # get sample images with annotations for visualization (Prediction project)
694
+ pred_ds = random.choice(self.eval_result.pred_dataset_infos)
695
+ self.eval_result.sample_images = self.api.image.get_list(pred_ds.id, limit=9)
696
+ image_ids = [x.id for x in self.eval_result.sample_images]
697
+ self.eval_result.sample_anns = self.api.annotation.download_batch(pred_ds.id, image_ids)