supervisely 6.73.237__py3-none-any.whl → 6.73.239__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (138) hide show
  1. supervisely/annotation/annotation.py +2 -2
  2. supervisely/api/entity_annotation/tag_api.py +11 -4
  3. supervisely/geometry/rectangle.py +7 -8
  4. supervisely/nn/__init__.py +1 -0
  5. supervisely/nn/benchmark/__init__.py +14 -2
  6. supervisely/nn/benchmark/base_benchmark.py +84 -37
  7. supervisely/nn/benchmark/base_evaluator.py +120 -0
  8. supervisely/nn/benchmark/base_visualizer.py +265 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
  18. supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
  19. supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
  20. supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
  21. supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
  22. supervisely/nn/benchmark/object_detection/__init__.py +0 -0
  23. supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
  24. supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
  25. supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
  26. supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
  27. supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
  28. supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
  29. supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
  30. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
  31. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
  32. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
  33. supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
  34. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
  35. supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
  36. supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
  37. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
  38. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
  39. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
  40. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
  41. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
  42. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
  43. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
  44. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
  45. supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
  46. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
  47. supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
  48. supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
  49. supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
  50. supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
  51. supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
  52. supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
  53. supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
  54. supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
  55. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
  56. supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
  57. supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
  58. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
  59. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
  60. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
  61. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
  62. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
  63. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
  64. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
  65. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
  66. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
  67. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
  68. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
  69. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
  70. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
  71. supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
  72. supervisely/nn/benchmark/utils/__init__.py +12 -0
  73. supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
  74. supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
  75. supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
  76. supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
  77. supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
  78. supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
  79. supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
  80. supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
  81. supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
  82. supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
  83. supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
  84. supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
  85. supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
  86. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
  87. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
  88. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
  89. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
  90. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
  91. supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
  92. supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
  93. supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
  94. supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
  95. supervisely/project/project.py +18 -6
  96. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/METADATA +3 -1
  97. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/RECORD +104 -82
  98. supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
  99. supervisely/nn/benchmark/evaluation/__init__.py +0 -3
  100. supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
  101. supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
  102. supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
  103. supervisely/nn/benchmark/utils.py +0 -13
  104. supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
  105. supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
  106. supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
  107. supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
  108. supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
  109. supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
  110. supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
  111. supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
  112. supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
  113. supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
  114. supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
  115. supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
  116. supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
  117. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
  118. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
  119. supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
  120. supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
  121. supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
  122. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
  123. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
  124. supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
  125. supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
  126. supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
  127. supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
  128. supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
  129. supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
  130. supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
  131. supervisely/nn/benchmark/visualization/visualizer.py +0 -729
  132. /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
  133. /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
  134. /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
  135. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/LICENSE +0 -0
  136. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/WHEEL +0 -0
  137. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/entry_points.txt +0 -0
  138. {supervisely-6.73.237.dist-info → supervisely-6.73.239.dist-info}/top_level.txt +0 -0
@@ -1,55 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- import pandas as pd
6
-
7
- from supervisely.nn.benchmark.cv_tasks import CVTask
8
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
9
- from supervisely.nn.benchmark.visualization.vis_widgets import Widget
10
-
11
- if TYPE_CHECKING:
12
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
13
-
14
-
15
- class ClasswiseErrorAnalysis(MetricVis):
16
-
17
- def __init__(self, loader: Visualizer) -> None:
18
- super().__init__(loader)
19
- self.cv_tasks: List[CVTask] = [CVTask.SEMANTIC_SEGMENTATION.value]
20
-
21
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
22
- import plotly.graph_objects as go # pylint: disable=import-error
23
-
24
- pd.options.mode.chained_assignment = None # TODO rm later
25
-
26
- df = self._loader.result_df
27
- df.drop(["mean"], inplace=True)
28
- df = df[["IoU", "E_extent_oU", "E_boundary_oU", "E_segment_oU"]]
29
- df.sort_values(by="IoU", ascending=False, inplace=True)
30
- labels = list(df.index)
31
- color_palette = ["cornflowerblue", "moccasin", "lightgreen", "orangered"]
32
-
33
- fig = go.Figure()
34
- for i, column in enumerate(df.columns):
35
- fig.add_trace(
36
- go.Bar(
37
- name=column,
38
- y=df[column],
39
- x=labels,
40
- marker_color=color_palette[i],
41
- )
42
- )
43
- fig.update_yaxes(range=[0, 1])
44
- fig.update_layout(
45
- barmode="stack",
46
- plot_bgcolor="rgba(0, 0, 0, 0)",
47
- title={
48
- "text": "Classwise segmentation error analysis",
49
- "y": 0.9,
50
- "x": 0.5,
51
- "xanchor": "center",
52
- "yanchor": "top",
53
- },
54
- )
55
- return fig
@@ -1,93 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING
4
-
5
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
6
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
7
-
8
- if TYPE_CHECKING:
9
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
10
-
11
-
12
- class ConfidenceScore(MetricVis):
13
-
14
- def __init__(self, loader: Visualizer) -> None:
15
- super().__init__(loader)
16
- self.schema = Schema(
17
- self._loader.vis_texts,
18
- markdown_confidence_score_1=Widget.Markdown(
19
- title="Confidence Score Profile",
20
- is_header=True,
21
- formats=[self._loader.vis_texts.definitions.confidence_threshold],
22
- ),
23
- notification_f1=Widget.Notification(
24
- formats_title=[round((self._loader.mp.m_full.get_f1_optimal_conf()[0] or 0.0), 4)]
25
- ),
26
- chart=Widget.Chart(),
27
- markdown_confidence_score_2=Widget.Markdown(),
28
- collapse_conf_score=Widget.Collapse(
29
- Schema(
30
- self._loader.vis_texts,
31
- markdown_plot_confidence_profile=Widget.Markdown(
32
- title="How to plot Confidence Profile?"
33
- ),
34
- )
35
- ),
36
- markdown_calibration_score_3=Widget.Markdown(),
37
- )
38
-
39
- def get_figure(self, widget: Widget): # -> Optional[go.Figure]:
40
- import plotly.express as px # pylint: disable=import-error
41
-
42
- color_map = {
43
- "Precision": "#1f77b4",
44
- "Recall": "orange",
45
- }
46
-
47
- fig = px.line(
48
- self._loader.dfsp_down,
49
- x="scores",
50
- y=["precision", "recall", "f1"],
51
- # title="Confidence Score Profile",
52
- labels={"value": "Value", "variable": "Metric", "scores": "Confidence Score"},
53
- width=None,
54
- height=500,
55
- color_discrete_map=color_map,
56
- )
57
- fig.update_traces(
58
- hovertemplate="Confidence Score: %{x:.2f}<br>Value: %{y:.2f}<extra></extra>"
59
- )
60
- fig.update_layout(yaxis=dict(range=[0, 1]), xaxis=dict(range=[0, 1], tick0=0, dtick=0.1))
61
-
62
- if self._loader.mp.f1_optimal_conf is not None and self._loader.mp.best_f1 is not None:
63
- # Add vertical line for the best threshold
64
- fig.add_shape(
65
- type="line",
66
- x0=self._loader.mp.f1_optimal_conf,
67
- x1=self._loader.mp.f1_optimal_conf,
68
- y0=0,
69
- y1=self._loader.mp.best_f1,
70
- line=dict(color="gray", width=2, dash="dash"),
71
- )
72
- fig.add_annotation(
73
- x=self._loader.mp.f1_optimal_conf,
74
- y=self._loader.mp.best_f1 + 0.04,
75
- text=f"F1-optimal threshold: {self._loader.mp.f1_optimal_conf:.2f}",
76
- showarrow=False,
77
- )
78
- fig.update_layout(
79
- dragmode=False,
80
- modebar=dict(
81
- remove=[
82
- "zoom2d",
83
- "pan2d",
84
- "select2d",
85
- "lasso2d",
86
- "zoomIn2d",
87
- "zoomOut2d",
88
- "autoScale2d",
89
- "resetScale2d",
90
- ]
91
- ),
92
- )
93
- return fig
@@ -1,144 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, Optional
4
-
5
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
6
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
7
-
8
- if TYPE_CHECKING:
9
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
10
-
11
- from supervisely.project.project_meta import ProjectMeta
12
-
13
-
14
- class ExplorerGrid(MetricVis):
15
-
16
- def __init__(self, loader: Visualizer) -> None:
17
- super().__init__(loader)
18
- self.clickable = True
19
- self.has_diffs_view = True
20
-
21
- filters = [{"confidence": [self.f1_optimal_conf, 1]}]
22
- self.schema = Schema(
23
- self._loader.vis_texts,
24
- markdown_explorer=Widget.Markdown(title="Explore Predictions", is_header=True),
25
- gallery=Widget.Gallery(filters=filters),
26
- )
27
-
28
- def _get_gallery(self, widget: Widget.Gallery, limit: Optional[int] = None) -> dict:
29
- res = {}
30
- api = self._loader._api
31
- pred_project_id = self._loader.dt_project_info.id
32
- pred_dataset = api.dataset.get_list(pred_project_id)[0]
33
- project_meta = ProjectMeta.from_json(api.project.get_meta(pred_project_id))
34
- pred_image_infos = api.image.get_list(dataset_id=pred_dataset.id, limit=limit)
35
- pred_image_ids = [x.id for x in pred_image_infos]
36
- ann_infos = api.annotation.download_batch(pred_dataset.id, pred_image_ids)
37
-
38
- for idx, (pred_image, ann_info) in enumerate(zip(pred_image_infos, ann_infos)):
39
- image_name = pred_image.name
40
- image_url = pred_image.preview_url
41
- widget.gallery.append(
42
- title=image_name,
43
- image_url=image_url,
44
- annotation_info=ann_info,
45
- column_index=idx % 3,
46
- project_meta=project_meta,
47
- ignore_tags_filtering=["outcome"],
48
- )
49
- res.update(widget.gallery.get_json_state())
50
- res.update(widget.gallery.get_json_data()["content"])
51
- res["layoutData"] = res.pop("annotations")
52
- res["projectMeta"] = project_meta.to_json()
53
-
54
- return res
55
-
56
- def get_gallery(self, widget: Widget.Gallery):
57
- return self._get_gallery(widget, limit=9)
58
-
59
- def get_gallery_click_data(self, widget: Widget.Gallery):
60
- res = {}
61
-
62
- res["layoutTemplate"] = [{"skipObjectTagsFiltering": ["outcome"]}] * 3
63
- click_data = res.setdefault("clickData", {})
64
- explore = click_data.setdefault("explore", {})
65
- explore["title"] = "Explore all predictions"
66
- images_ids = explore.setdefault("imagesIds", [])
67
-
68
- images_ids.extend([cd.pred_image_info.id for cd in self._loader.comparison_data.values()])
69
-
70
- return res
71
-
72
- def get_diff_gallery_data(self, widget: Widget.Gallery) -> Optional[dict]:
73
- res = {}
74
-
75
- res["layoutTemplate"] = [
76
- {"skipObjectTagsFiltering": True, "columnTitle": "Ground Truth"},
77
- {"skipObjectTagsFiltering": ["outcome"], "columnTitle": "Prediction"},
78
- {"skipObjectTagsFiltering": ["confidence"], "columnTitle": "Difference"},
79
- ]
80
-
81
- click_data = res.setdefault("clickData", {})
82
-
83
- default_filters = [
84
- {"type": "tag", "tagId": "confidence", "value": [self.f1_optimal_conf, 1]},
85
- # {"type": "tag", "tagId": "outcome", "value": "FP"},
86
- ]
87
- for img_comparison_data in self._loader.comparison_data.values():
88
- gt = img_comparison_data.gt_image_info
89
- pred = img_comparison_data.pred_image_info
90
- diff = img_comparison_data.diff_image_info
91
- assert gt.name == pred.name == diff.name
92
- key = click_data.setdefault(str(pred.id), {})
93
- key["imagesIds"] = [gt.id, pred.id, diff.id]
94
- key["filters"] = default_filters
95
- key["title"] = f"Image: {pred.name}"
96
- image_id = pred.id
97
- ann_json = img_comparison_data.pred_annotation.to_json()
98
- assert image_id == pred.id
99
- object_bindings = []
100
- for obj in ann_json["objects"]:
101
- for tag in obj["tags"]:
102
- if tag["name"] == "matched_gt_id":
103
- object_bindings.append(
104
- [
105
- {
106
- "id": obj["id"],
107
- "annotationKey": image_id,
108
- },
109
- {
110
- "id": int(tag["value"]),
111
- "annotationKey": gt.id,
112
- },
113
- ]
114
- )
115
-
116
- image_id = diff.id
117
- ann_json = img_comparison_data.diff_annotation.to_json()
118
- assert image_id == diff.id
119
- for obj in ann_json["objects"]:
120
- for tag in obj["tags"]:
121
- if tag["name"] == "matched_gt_id":
122
- object_bindings.append(
123
- [
124
- {
125
- "id": obj["id"],
126
- "annotationKey": image_id,
127
- },
128
- {
129
- "id": int(tag["value"]),
130
- "annotationKey": pred.id,
131
- },
132
- ]
133
- )
134
- key["objectsBindings"] = object_bindings
135
-
136
- return res
137
-
138
- # def get_gallery_modal(self, widget: Widget.Gallery):
139
- # res = self.get_gallery(widget)
140
-
141
- # res.pop("layout")
142
- # res.pop("layoutData")
143
-
144
- # return res
@@ -1,115 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, Optional
4
-
5
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
6
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
7
-
8
- if TYPE_CHECKING:
9
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
10
-
11
-
12
- class FrequentlyConfused(MetricVis):
13
-
14
- def __init__(self, loader: Visualizer) -> None:
15
- super().__init__(loader)
16
-
17
- self.clickable: bool = True
18
- self.switchable: bool = True
19
- self._keypair_sep: str = " - "
20
- df = self._loader.mp.frequently_confused()
21
- if df.empty:
22
- self.schema = Schema(
23
- self._loader.vis_texts,
24
- empty=Widget.Markdown(
25
- title="Frequently Confused Classes",
26
- is_header=True,
27
- formats=[
28
- "Frequently Confused Classes",
29
- "No frequently confused class pairs found",
30
- ],
31
- ),
32
- )
33
- self.empty = True
34
- return
35
-
36
- pair = df["category_pair"][0]
37
- prob = df["probability"][0]
38
- self.schema = Schema(
39
- self._loader.vis_texts,
40
- markdown_frequently_confused=Widget.Markdown(
41
- title="Frequently Confused Classes",
42
- is_header=True,
43
- formats=[
44
- pair[0],
45
- pair[1],
46
- prob.round(2),
47
- pair[0],
48
- pair[1],
49
- (prob * 100).round(),
50
- pair[0],
51
- pair[1],
52
- pair[1],
53
- pair[0],
54
- ],
55
- ),
56
- chart_01=Widget.Chart(switch_key="probability"),
57
- chart_02=Widget.Chart(switch_key="count"),
58
- )
59
-
60
- def get_figure(self, widget: Widget.Chart): # -> Optional[Tuple[go.Figure]]:
61
- if self.empty:
62
- return
63
-
64
- import plotly.graph_objects as go # pylint: disable=import-error
65
-
66
- # Frequency of confusion as bar chart
67
- confused_df = self._loader.mp.frequently_confused()
68
- confused_name_pairs = confused_df["category_pair"]
69
- x_labels = [f"{pair[0]} - {pair[1]}" for pair in confused_name_pairs]
70
- y_labels = confused_df[widget.switch_key]
71
-
72
- fig = go.Figure()
73
- fig.add_trace(
74
- go.Bar(x=x_labels, y=y_labels, marker=dict(color=y_labels, colorscale="Reds"))
75
- )
76
- fig.update_layout(
77
- # title="Frequently confused class pairs",
78
- xaxis_title="Class Pair",
79
- yaxis_title=y_labels.name.capitalize(),
80
- )
81
- fig.update_traces(text=y_labels.round(2))
82
- fig.update_traces(
83
- hovertemplate="Class Pair: %{x}<br>"
84
- + y_labels.name.capitalize()
85
- + ": %{y:.2f}<extra></extra>"
86
- )
87
- return fig
88
-
89
- def get_click_data(self, widget: Widget.Chart) -> Optional[dict]:
90
- if not self.clickable or self.empty:
91
- return
92
- res = dict(projectMeta=self._loader.dt_project_meta.to_json())
93
-
94
- res["layoutTemplate"] = [None, None, None]
95
- res["clickData"] = {}
96
-
97
- for keypair, v in self._loader.click_data.frequently_confused.items():
98
- subkey1, subkey2 = keypair
99
- key = subkey1 + self._keypair_sep + subkey2
100
- res["clickData"][key] = {}
101
- res["clickData"][key]["imagesIds"] = []
102
- res["clickData"][key]["title"] = f"Confused classes: {subkey1} - {subkey2}"
103
-
104
- img_ids = set()
105
- obj_ids = set()
106
- for x in v:
107
- img_ids.add(x["dt_img_id"])
108
- obj_ids.add(x["dt_obj_id"])
109
-
110
- res["clickData"][key]["imagesIds"] = list(img_ids)
111
- res["clickData"][key]["filters"] = [
112
- {"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
113
- ]
114
-
115
- return res
@@ -1,86 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING
4
-
5
- from supervisely.nn.benchmark.cv_tasks import CVTask
6
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
7
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
8
-
9
- if TYPE_CHECKING:
10
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
11
-
12
-
13
- class IOUDistribution(MetricVis):
14
-
15
- def __init__(self, loader: Visualizer) -> None:
16
- super().__init__(loader)
17
- title = "Localization Accuracy (IoU)"
18
- if self._loader.cv_task in [CVTask.INSTANCE_SEGMENTATION, CVTask.SEMANTIC_SEGMENTATION]:
19
- title = "Mask Accuracy (IoU)"
20
- self.schema = Schema(
21
- self._loader.vis_texts,
22
- markdown_localization_accuracy=Widget.Markdown(
23
- title=title,
24
- is_header=True,
25
- formats=[self._loader.vis_texts.definitions.iou_score],
26
- ),
27
- markdown_iou_distribution=Widget.Markdown(
28
- title="IoU Distribution",
29
- is_header=True,
30
- formats=[self._loader.vis_texts.definitions.iou_score],
31
- ),
32
- notification_avg_iou=Widget.Notification(
33
- formats_title=[self._loader.base_metrics()["iou"].round(2)]
34
- ),
35
- chart=Widget.Chart(),
36
- collapse_iou=Widget.Collapse(
37
- Schema(
38
- self._loader.vis_texts,
39
- markdown_iou_calculation=Widget.Markdown(title="How IoU is calculated?"),
40
- )
41
- ),
42
- )
43
-
44
- def get_figure(self, widget: Widget): # -> Optional[go.Figure]:
45
- import plotly.graph_objects as go # pylint: disable=import-error
46
-
47
- fig = go.Figure()
48
- nbins = 40
49
- fig.add_trace(go.Histogram(x=self._loader.mp.ious, nbinsx=nbins))
50
- fig.update_layout(
51
- # title="IoU Distribution",
52
- xaxis_title="IoU",
53
- yaxis_title="Count",
54
- width=600,
55
- height=500,
56
- )
57
-
58
- # Add annotation for mean IoU as vertical line
59
- mean_iou = self._loader.mp.ious.mean()
60
- y1 = len(self._loader.mp.ious) // nbins
61
- fig.add_shape(
62
- type="line",
63
- x0=mean_iou,
64
- x1=mean_iou,
65
- y0=0,
66
- y1=y1,
67
- line=dict(color="orange", width=2, dash="dash"),
68
- )
69
- fig.update_traces(hovertemplate="IoU: %{x:.2f}<br>Count: %{y}<extra></extra>")
70
- fig.add_annotation(x=mean_iou, y=y1, text=f"Mean IoU: {mean_iou:.2f}", showarrow=False)
71
- fig.update_layout(
72
- dragmode=False,
73
- modebar=dict(
74
- remove=[
75
- "zoom2d",
76
- "pan2d",
77
- "select2d",
78
- "lasso2d",
79
- "zoomIn2d",
80
- "zoomOut2d",
81
- "autoScale2d",
82
- "resetScale2d",
83
- ]
84
- ),
85
- )
86
- return fig
@@ -1,119 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, Optional
4
-
5
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
6
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
7
-
8
- if TYPE_CHECKING:
9
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
10
-
11
-
12
- class OutcomeCounts(MetricVis):
13
-
14
- def __init__(self, loader: Visualizer) -> None:
15
- super().__init__(loader)
16
-
17
- self.clickable: bool = True
18
- self.schema = Schema(
19
- self._loader.vis_texts,
20
- markdown_outcome_counts=Widget.Markdown(
21
- title="Outcome Counts",
22
- is_header=True,
23
- formats=[
24
- self._loader.vis_texts.definitions.true_positives,
25
- self._loader.vis_texts.definitions.false_positives,
26
- self._loader.vis_texts.definitions.false_negatives,
27
- ],
28
- ),
29
- chart=Widget.Chart(),
30
- )
31
-
32
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
33
- import plotly.graph_objects as go # pylint: disable=import-error
34
-
35
- fig = go.Figure()
36
- fig.add_trace(
37
- go.Bar(
38
- x=[self._loader.mp.TP_count],
39
- y=["Outcome"],
40
- name="TP",
41
- orientation="h",
42
- marker=dict(color="#8ACAA1"),
43
- hovertemplate="TP: %{x} objects<extra></extra>",
44
- )
45
- )
46
- fig.add_trace(
47
- go.Bar(
48
- x=[self._loader.mp.FN_count],
49
- y=["Outcome"],
50
- name="FN",
51
- orientation="h",
52
- marker=dict(color="#dd3f3f"),
53
- hovertemplate="FN: %{x} objects<extra></extra>",
54
- )
55
- )
56
- fig.add_trace(
57
- go.Bar(
58
- x=[self._loader.mp.FP_count],
59
- y=["Outcome"],
60
- name="FP",
61
- orientation="h",
62
- marker=dict(color="#F7ADAA"),
63
- hovertemplate="FP: %{x} objects<extra></extra>",
64
- )
65
- )
66
- fig.update_layout(
67
- barmode="stack",
68
- width=600,
69
- height=300,
70
- )
71
- fig.update_xaxes(title_text="Count (objects)")
72
- fig.update_yaxes(tickangle=-90)
73
-
74
- fig.update_layout(
75
- dragmode=False,
76
- modebar=dict(
77
- remove=[
78
- "zoom2d",
79
- "pan2d",
80
- "select2d",
81
- "lasso2d",
82
- "zoomIn2d",
83
- "zoomOut2d",
84
- "autoScale2d",
85
- "resetScale2d",
86
- ]
87
- ),
88
- )
89
- return fig
90
-
91
- def get_click_data(self, widget: Widget.Chart) -> Optional[dict]:
92
- if not self.clickable:
93
- return
94
- res = {}
95
-
96
- res["layoutTemplate"] = [None, None, None]
97
- res["clickData"] = {}
98
- for outcome, matches_data in self._loader.click_data.outcome_counts.items():
99
- res["clickData"][outcome] = {}
100
- res["clickData"][outcome]["imagesIds"] = []
101
-
102
- img_ids = set()
103
- for match_data in matches_data:
104
- img_comparison_data = self._loader.comparison_data[match_data["gt_img_id"]]
105
- if outcome == "FN":
106
- img_ids.add(img_comparison_data.diff_image_info.id)
107
- else:
108
- img_ids.add(img_comparison_data.pred_image_info.id)
109
-
110
- res["clickData"][outcome][
111
- "title"
112
- ] = f"{outcome}: {len(matches_data)} object{'s' if len(matches_data) > 1 else ''}"
113
- res["clickData"][outcome]["imagesIds"] = list(img_ids)
114
- res["clickData"][outcome]["filters"] = [
115
- {"type": "tag", "tagId": "confidence", "value": [0, 1]},
116
- {"type": "tag", "tagId": "outcome", "value": outcome},
117
- ]
118
-
119
- return res