supervisely 6.73.238__py3-none-any.whl → 6.73.240__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. supervisely/annotation/annotation.py +2 -2
  2. supervisely/api/entity_annotation/tag_api.py +11 -4
  3. supervisely/api/file_api.py +17 -3
  4. supervisely/nn/__init__.py +1 -0
  5. supervisely/nn/benchmark/__init__.py +14 -2
  6. supervisely/nn/benchmark/base_benchmark.py +84 -37
  7. supervisely/nn/benchmark/base_evaluator.py +120 -0
  8. supervisely/nn/benchmark/base_visualizer.py +265 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
  18. supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
  19. supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
  20. supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
  21. supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
  22. supervisely/nn/benchmark/object_detection/__init__.py +0 -0
  23. supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
  24. supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
  25. supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
  26. supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
  27. supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
  28. supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
  29. supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
  30. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
  31. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
  32. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
  33. supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
  34. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
  35. supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
  36. supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
  37. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
  38. supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
  39. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
  40. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
  41. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
  42. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
  43. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
  44. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
  45. supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
  46. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
  47. supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
  48. supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
  49. supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
  50. supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
  51. supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
  52. supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
  53. supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
  54. supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
  55. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
  56. supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
  57. supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
  58. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
  59. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
  60. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
  61. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
  62. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
  63. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
  64. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
  65. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
  66. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
  67. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
  68. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
  69. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
  70. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
  71. supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
  72. supervisely/nn/benchmark/utils/__init__.py +12 -0
  73. supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
  74. supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
  75. supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
  76. supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
  77. supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
  78. supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
  79. supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
  80. supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
  81. supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
  82. supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
  83. supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
  84. supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
  85. supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
  86. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
  87. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
  88. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
  89. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
  90. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
  91. supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
  92. supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
  93. supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
  94. supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
  95. supervisely/project/project.py +18 -6
  96. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/METADATA +3 -1
  97. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/RECORD +104 -82
  98. supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
  99. supervisely/nn/benchmark/evaluation/__init__.py +0 -3
  100. supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
  101. supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
  102. supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
  103. supervisely/nn/benchmark/utils.py +0 -13
  104. supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
  105. supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
  106. supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
  107. supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
  108. supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
  109. supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
  110. supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
  111. supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
  112. supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
  113. supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
  114. supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
  115. supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
  116. supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
  117. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
  118. supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
  119. supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
  120. supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
  121. supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
  122. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
  123. supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
  124. supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
  125. supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
  126. supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
  127. supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
  128. supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
  129. supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
  130. supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
  131. supervisely/nn/benchmark/visualization/visualizer.py +0 -729
  132. /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
  133. /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
  134. /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
  135. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/LICENSE +0 -0
  136. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/WHEEL +0 -0
  137. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/entry_points.txt +0 -0
  138. {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/top_level.txt +0 -0
@@ -1,148 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, Optional
4
-
5
- import numpy as np
6
- import pandas as pd
7
-
8
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
9
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
10
-
11
- if TYPE_CHECKING:
12
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
13
-
14
-
15
- class PerClassOutcomeCounts(MetricVis):
16
-
17
- def __init__(self, loader: Visualizer) -> None:
18
- super().__init__(loader)
19
- self.clickable: bool = True
20
- self.switchable: bool = True
21
- self.schema = Schema(
22
- self._loader.vis_texts,
23
- markdown_class_outcome_counts_1=Widget.Markdown(
24
- title="Outcome Counts by Class",
25
- is_header=True,
26
- formats=[
27
- self._loader.vis_texts.definitions.true_positives,
28
- self._loader.vis_texts.definitions.false_positives,
29
- self._loader.vis_texts.definitions.false_negatives,
30
- ],
31
- ),
32
- markdown_class_outcome_counts_2=Widget.Markdown(
33
- formats=[self._loader.vis_texts.definitions.f1_score]
34
- ),
35
- collapse_perclass_outcome=Widget.Collapse(
36
- Schema(
37
- self._loader.vis_texts,
38
- markdown_normalization=Widget.Markdown(title="Normalization"),
39
- )
40
- ),
41
- chart_01=Widget.Chart(switch_key="normalized"),
42
- chart_02=Widget.Chart(switch_key="absolute"),
43
- )
44
-
45
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
46
- import plotly.express as px # pylint: disable=import-error
47
-
48
- # Per-class Counts
49
- iou_thres = 0
50
-
51
- tp = self._loader.mp.true_positives[:, iou_thres]
52
- fp = self._loader.mp.false_positives[:, iou_thres]
53
- fn = self._loader.mp.false_negatives[:, iou_thres]
54
-
55
- # normalize
56
- support = tp + fn
57
- with np.errstate(invalid="ignore", divide="ignore"):
58
- tp_rel = tp / support
59
- fp_rel = fp / support
60
- fn_rel = fn / support
61
-
62
- # sort by f1
63
- sort_scores = 2 * tp / (2 * tp + fp + fn)
64
-
65
- K = len(self._loader.mp.cat_names)
66
- sort_indices = np.argsort(sort_scores)
67
- cat_names_sorted = [self._loader.mp.cat_names[i] for i in sort_indices]
68
- tp_rel, fn_rel, fp_rel = tp_rel[sort_indices], fn_rel[sort_indices], fp_rel[sort_indices]
69
-
70
- objects_count = np.concatenate([tp[sort_indices], fn[sort_indices], fp[sort_indices]])
71
- data = {
72
- "Type": ["TP"] * K + ["FN"] * K + ["FP"] * K,
73
- "category": cat_names_sorted * 3,
74
- }
75
- y_label = ""
76
- if widget.switch_key == "normalized":
77
- y_label = "Objects Fraction"
78
- # Stacked per-class counts
79
- data["count"] = np.concatenate([tp_rel, fn_rel, fp_rel])
80
- elif widget.switch_key == "absolute":
81
- y_label = "Objects Count"
82
- data["count"] = objects_count
83
-
84
- df = pd.DataFrame(data)
85
-
86
- color_map = {"TP": "#8ACAA1", "FN": "#dd3f3f", "FP": "#F7ADAA"}
87
- fig = px.bar(
88
- df,
89
- x="category",
90
- y="count",
91
- color="Type",
92
- # title="Per-class Outcome Counts",
93
- height=500,
94
- width=1000,
95
- labels={"count": y_label, "category": "Class"},
96
- color_discrete_map=color_map,
97
- )
98
- xaxis_title = fig.layout.xaxis.title.text
99
- yaxis_title = fig.layout.yaxis.title.text
100
- if widget.switch_key == "normalized":
101
-
102
- fig.update_traces(
103
- hovertemplate="Type=%{fullData.name} <br>"
104
- + xaxis_title
105
- + "=%{x}<br>"
106
- + yaxis_title
107
- + "=%{y:.2f}<extra></extra>"
108
- # "Images count=%{y:.2f}<extra></extra>"
109
- )
110
- elif widget.switch_key == "absolute":
111
- fig.update_traces(
112
- hovertemplate="Type=%{fullData.name} <br>"
113
- + xaxis_title
114
- + "=%{x}<br>"
115
- + yaxis_title
116
- + "=%{y}<extra></extra>",
117
- )
118
- return fig
119
-
120
- def get_click_data(self, widget: Widget.Chart) -> Optional[dict]:
121
- if not self.clickable:
122
- return
123
- res = {}
124
- res["layoutTemplate"] = [None, None, None]
125
-
126
- res["clickData"] = {}
127
- for class_name, v1 in self._loader.click_data.outcome_counts_by_class.items():
128
- for outcome, matches_data in v1.items():
129
- key = class_name + self._keypair_sep + outcome
130
- res["clickData"][key] = {}
131
- res["clickData"][key]["imagesIds"] = []
132
- res["clickData"][key][
133
- "title"
134
- ] = f"Images with objects of class '{class_name}' and outcome '{outcome}'"
135
-
136
- img_ids = set()
137
- for match_data in matches_data:
138
- img_comparison_data = self._loader.comparison_data[match_data["gt_img_id"]]
139
- if outcome == "FN":
140
- img_ids.add(img_comparison_data.diff_image_info.id)
141
- else:
142
- img_ids.add(img_comparison_data.pred_image_info.id)
143
- res["clickData"][key]["imagesIds"] = list(img_ids)
144
- res["clickData"][key]["filters"] = [
145
- {"type": "tag", "tagId": "confidence", "value": [0, 1]},
146
- {"type": "tag", "tagId": "outcome", "value": outcome},
147
- ]
148
- return res
@@ -1,109 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- from supervisely.nn.benchmark.cv_tasks import CVTask
6
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
7
- from supervisely.nn.benchmark.visualization.vis_widgets import Widget
8
-
9
- if TYPE_CHECKING:
10
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
11
-
12
-
13
- class OverallErrorAnalysis(MetricVis):
14
-
15
- def __init__(self, loader: Visualizer) -> None:
16
- super().__init__(loader)
17
- self.cv_tasks: List[CVTask] = [CVTask.SEMANTIC_SEGMENTATION.value]
18
-
19
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
20
- import plotly.graph_objects as go # pylint: disable=import-error
21
- from plotly.subplots import make_subplots # pylint: disable=import-error
22
-
23
- fig = make_subplots(
24
- rows=1,
25
- cols=3,
26
- subplot_titles=(
27
- "Basic segmentation metrics",
28
- "Intersection & Error over Union",
29
- "Renormalized Error over Union",
30
- ),
31
- specs=[[{"type": "polar"}, {"type": "domain"}, {"type": "xy"}]],
32
- )
33
-
34
- # first subplot
35
- categories = [
36
- "mPixel accuracy",
37
- "mPrecision",
38
- "mRecall",
39
- "mF1-score",
40
- "mIoU",
41
- "mBoundaryIoU",
42
- "mPixel accuracy",
43
- ]
44
- values = [64, 60.4, 52, 51.4, 37.9, 20.5, 64]
45
- trace_1 = go.Scatterpolar(
46
- mode="lines+text",
47
- r=values,
48
- theta=categories,
49
- fill="toself",
50
- fillcolor="cornflowerblue",
51
- line_color="blue",
52
- opacity=0.6,
53
- text=[64, 60.4, 52, 51.4, 37.9, 20.5, 64],
54
- textposition=[
55
- "bottom right",
56
- "top center",
57
- "top center",
58
- "middle left",
59
- "bottom center",
60
- "bottom right",
61
- "bottom right",
62
- ],
63
- textfont=dict(color="blue"),
64
- )
65
- fig.add_trace(trace_1, row=1, col=1)
66
-
67
- # second subplot
68
- labels = ["mIoU", "mBoundaryEoU", "mExtentEoU", "mSegmentEoU"]
69
- values = [37.9, 13.1, 25.8, 23.2]
70
- trace_2 = go.Pie(
71
- labels=labels,
72
- values=values,
73
- hole=0.5,
74
- textposition="outside",
75
- textinfo="percent+label",
76
- marker=dict(colors=["cornflowerblue", "moccasin", "lightgreen", "orangered"]),
77
- )
78
- fig.add_trace(trace_2, row=1, col=2)
79
-
80
- # third subplot
81
- labels = ["boundary", "extent", "segment"]
82
- values = [28.9, 37.6, 23.2]
83
- trace_3 = go.Bar(
84
- x=labels,
85
- y=values,
86
- orientation="v",
87
- text=values,
88
- width=[0.5, 0.5, 0.5],
89
- textposition="outside",
90
- marker_color=["moccasin", "lightgreen", "orangered"],
91
- )
92
- fig.add_trace(trace_3, row=1, col=3)
93
-
94
- fig.update_layout(
95
- height=400,
96
- width=1200,
97
- polar=dict(
98
- radialaxis=dict(visible=True, showline=False, showticklabels=False, range=[0, 100])
99
- ),
100
- showlegend=False,
101
- plot_bgcolor="rgba(0, 0, 0, 0)",
102
- yaxis=dict(showticklabels=False),
103
- yaxis_range=[0, int(max(values)) + 4],
104
- )
105
- fig.layout.annotations[0].update(y=1.2)
106
- fig.layout.annotations[1].update(y=1.2)
107
- fig.layout.annotations[2].update(y=1.2)
108
-
109
- return fig
@@ -1,189 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import datetime
4
- from typing import TYPE_CHECKING
5
-
6
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
7
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
8
-
9
- if TYPE_CHECKING:
10
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
11
-
12
-
13
- class Overview(MetricVis):
14
-
15
- def __init__(self, loader: Visualizer) -> None:
16
- super().__init__(loader)
17
- self._is_overview = True
18
- info = loader.inference_info or {}
19
- url = info.get("checkpoint_url")
20
- link_text = info.get("custom_checkpoint_path")
21
- if link_text is None:
22
- link_text = url
23
- if link_text is not None:
24
- link_text = link_text.replace("_", "\_")
25
-
26
- # Note about validation dataset
27
- classes_str, note_about_val_dataset, train_session = self.get_overview_info()
28
-
29
- checkpoint_name = info.get("deploy_params", {}).get("checkpoint_name", "")
30
- me = self._loader._api.user.get_my_info().login
31
- current_date = datetime.datetime.now().strftime("%d %B %Y, %H:%M")
32
- model_name = info.get("model_name") or "Custom"
33
-
34
- self.schema = Schema(
35
- self._loader.vis_texts,
36
- markdown_header=Widget.Markdown(
37
- title="Header",
38
- is_header=False,
39
- formats=[checkpoint_name, me, current_date], # Title
40
- ),
41
- markdown_overview=Widget.Markdown(
42
- title="Overview",
43
- is_header=True,
44
- formats=[
45
- model_name.replace("_", "\_"),
46
- checkpoint_name.replace("_", "\_"),
47
- info.get("architecture"),
48
- info.get("task_type"),
49
- info.get("runtime"),
50
- url,
51
- link_text,
52
- self._loader.gt_project_info.id,
53
- self._loader.gt_project_info.name,
54
- classes_str,
55
- note_about_val_dataset,
56
- train_session,
57
- self._loader.docs_link,
58
- ],
59
- ),
60
- markdown_key_metrics=Widget.Markdown(
61
- title="Key Metrics",
62
- is_header=True,
63
- formats=[
64
- self._loader.vis_texts.definitions.average_precision,
65
- self._loader.vis_texts.definitions.confidence_threshold,
66
- self._loader.vis_texts.definitions.confidence_score,
67
- ],
68
- ),
69
- table_key_metrics=Widget.Table(),
70
- chart=Widget.Chart(),
71
- )
72
-
73
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]
74
- import plotly.graph_objects as go # pylint: disable=import-error
75
-
76
- # Overall Metrics
77
- base_metrics = self._loader.mp.base_metrics()
78
- r = list(base_metrics.values())
79
- theta = [self._loader.mp.metric_names[k] for k in base_metrics.keys()]
80
- fig = go.Figure()
81
- fig.add_trace(
82
- go.Scatterpolar(
83
- r=r + [r[0]],
84
- theta=theta + [theta[0]],
85
- fill="toself",
86
- name="Overall Metrics",
87
- hovertemplate="%{theta}: %{r:.2f}<extra></extra>",
88
- )
89
- )
90
- fig.update_layout(
91
- polar=dict(
92
- radialaxis=dict(
93
- range=[0.0, 1.0],
94
- ticks="outside",
95
- ),
96
- angularaxis=dict(rotation=90, direction="clockwise"),
97
- ),
98
- dragmode=False,
99
- # title="Overall Metrics",
100
- # width=700,
101
- # height=500,
102
- # autosize=False,
103
- margin=dict(l=25, r=25, t=25, b=25),
104
- )
105
- fig.update_layout(
106
- modebar=dict(
107
- remove=[
108
- "zoom2d",
109
- "pan2d",
110
- "select2d",
111
- "lasso2d",
112
- "zoomIn2d",
113
- "zoomOut2d",
114
- "autoScale2d",
115
- "resetScale2d",
116
- ]
117
- )
118
- )
119
- return fig
120
-
121
- def get_overview_info(self):
122
- classes_cnt = len(self._loader._benchmark.classes_whitelist or [])
123
- classes_str = "classes" if classes_cnt > 1 else "class"
124
- classes_str = f"{classes_cnt} {classes_str}"
125
-
126
- train_session, images_str = "", ""
127
- gt_project_id = self._loader.gt_project_info.id
128
- gt_images_ids = self._loader._benchmark.gt_images_ids
129
- gt_dataset_ids = self._loader._benchmark.gt_dataset_ids
130
- train_info = self._loader._benchmark.train_info
131
- if gt_images_ids is not None:
132
- val_imgs_cnt = len(gt_images_ids)
133
- elif gt_dataset_ids is not None:
134
- datasets = [self._loader._api.dataset.get_info_by_id(ds) for ds in gt_dataset_ids]
135
- val_imgs_cnt = sum(ds.items_count for ds in datasets)
136
- else:
137
- val_imgs_cnt = self._loader.gt_project_info.items_count
138
-
139
- if train_info:
140
- train_task_id = train_info.get("app_session_id")
141
- if train_task_id:
142
- task_info = self._loader._api.task.get_info_by_id(int(train_task_id))
143
- app_id = task_info["meta"]["app"]["id"]
144
- train_session = f'- **Training dashboard**: <a href="/apps/{app_id}/sessions/{train_task_id}" target="_blank">open</a>'
145
-
146
- train_imgs_cnt = train_info.get("images_count")
147
- images_str = f", {train_imgs_cnt} images in train, {val_imgs_cnt} images in validation"
148
-
149
- if gt_images_ids is not None:
150
- images_str += f". Evaluated using subset - {val_imgs_cnt} images"
151
- elif gt_dataset_ids is not None:
152
- links = [
153
- f'<a href="/projects/{gt_project_id}/datasets/{ds.id}" target="_blank">{ds.name}</a>'
154
- for ds in datasets
155
- ]
156
- images_str += (
157
- f". Evaluated on the dataset{'s' if len(links) > 1 else ''}: {', '.join(links)}"
158
- )
159
- else:
160
- images_str += f". Evaluated on the whole project ({val_imgs_cnt} images)"
161
-
162
- return classes_str, images_str, train_session
163
-
164
- def get_table(self, widget: Widget.Table) -> dict:
165
- res = {}
166
-
167
- columns = ["metrics", "values"]
168
- res["content"] = []
169
- for metric, value in self._loader.mp.metric_table().items():
170
- row = [metric, round(value, 2)]
171
- dct = {
172
- "row": row,
173
- "id": metric,
174
- "items": row,
175
- }
176
- res["content"].append(dct)
177
-
178
- columns_options = [
179
- {"customCell": True, "disableSort": True},
180
- {"disableSort": True},
181
- ]
182
-
183
- res["columns"] = columns
184
- res["columnsOptions"] = columns_options
185
-
186
- widget.main_column = columns[0]
187
- widget.show_header_controls = False
188
- widget.width = "60%"
189
- return res
@@ -1,57 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING
4
-
5
- import numpy as np
6
-
7
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
8
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
9
-
10
- if TYPE_CHECKING:
11
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
12
-
13
-
14
- class PerClassAvgPrecision(MetricVis):
15
-
16
- def __init__(self, loader: Visualizer) -> None:
17
- super().__init__(loader)
18
- self.clickable = True
19
- self.schema = Schema(
20
- self._loader.vis_texts,
21
- markdown_class_ap=Widget.Markdown(
22
- title="Average Precision by Class",
23
- is_header=True,
24
- formats=[self._loader.vis_texts.definitions.average_precision],
25
- ),
26
- chart=Widget.Chart(),
27
- )
28
-
29
- def get_figure(self, widget: Widget): # -> Optional[go.Figure]:
30
- import plotly.express as px # pylint: disable=import-error
31
-
32
- # AP per-class
33
- ap_per_class = self._loader.mp.coco_precision[:, :, :, 0, 2].mean(axis=(0, 1))
34
- ap_per_class[ap_per_class == -1] = 0 # -1 is a placeholder for no GT
35
- labels = dict(r="Average Precision", theta="Class")
36
- fig = px.scatter_polar(
37
- r=ap_per_class,
38
- theta=self._loader.mp.cat_names,
39
- # title="Per-class Average Precision (AP)",
40
- labels=labels,
41
- width=800,
42
- height=800,
43
- range_r=[0, 1],
44
- )
45
- fig.update_traces(fill="toself")
46
- fig.update_layout(
47
- modebar_add=["resetScale"],
48
- margin=dict(l=80, r=80, t=0, b=0),
49
- )
50
- fig.update_traces(
51
- hovertemplate=labels["theta"]
52
- + ": %{theta}<br>"
53
- + labels["r"]
54
- + ": %{r:.2f}<br>"
55
- + "<extra></extra>"
56
- )
57
- return fig
@@ -1,101 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING
4
-
5
- import numpy as np
6
-
7
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
8
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
9
-
10
- if TYPE_CHECKING:
11
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
12
-
13
-
14
- class PRCurve(MetricVis):
15
-
16
- def __init__(self, loader: Visualizer) -> None:
17
- super().__init__(loader)
18
- self.schema = Schema(
19
- self._loader.vis_texts,
20
- markdown_pr_curve=Widget.Markdown(
21
- title="Precision-Recall Curve",
22
- is_header=True,
23
- formats=[self._loader.vis_texts.definitions.f1_score],
24
- ),
25
- collapse_pr=Widget.Collapse(
26
- schema=Schema(
27
- self._loader.vis_texts,
28
- markdown_trade_offs=Widget.Markdown(
29
- title="About Trade-offs between precision and recall"
30
- ),
31
- markdown_what_is_pr_curve=Widget.Markdown(
32
- title="How the PR curve is built?",
33
- formats=[
34
- self._loader.vis_texts.definitions.confidence_score,
35
- self._loader.vis_texts.definitions.true_positives,
36
- self._loader.vis_texts.definitions.false_positives,
37
- ],
38
- ),
39
- )
40
- ),
41
- notification_ap=Widget.Notification(
42
- formats_title=[loader.base_metrics()["mAP"].round(2)]
43
- ),
44
- chart=Widget.Chart(),
45
- )
46
-
47
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
48
- import plotly.express as px # pylint: disable=import-error
49
- import plotly.graph_objects as go # pylint: disable=import-error
50
-
51
- # Precision-Recall curve
52
- pr_curve = self._loader.mp.pr_curve().copy()
53
- pr_curve[pr_curve == -1] = np.nan # -1 is a placeholder for no GT
54
- pr_curve = np.nanmean(pr_curve, axis=-1)
55
- fig = px.line(
56
- x=self._loader.mp.recThrs,
57
- y=pr_curve,
58
- # title="Precision-Recall Curve",
59
- labels={"x": "Recall", "y": "Precision"},
60
- width=600,
61
- height=500,
62
- )
63
- fig.data[0].name = "Model"
64
- fig.data[0].showlegend = True
65
- fig.update_traces(fill="tozeroy", line=dict(color="#1f77b4"))
66
- fig.add_trace(
67
- go.Scatter(
68
- x=self._loader.mp.recThrs,
69
- y=[1] * len(self._loader.mp.recThrs),
70
- name="Perfect",
71
- line=dict(color="orange", dash="dash"),
72
- showlegend=True,
73
- )
74
- )
75
- fig.add_annotation(
76
- text=f"mAP = {self._loader.mp.base_metrics()['mAP']:.2f}",
77
- xref="paper",
78
- yref="paper",
79
- x=0.98,
80
- y=0.92,
81
- showarrow=False,
82
- bgcolor="white",
83
- )
84
- fig.update_traces(hovertemplate="Recall: %{x:.2f}<br>Precision: %{y:.2f}<extra></extra>")
85
- fig.update_layout(
86
- dragmode=False,
87
- modebar=dict(
88
- remove=[
89
- "zoom2d",
90
- "pan2d",
91
- "select2d",
92
- "lasso2d",
93
- "zoomIn2d",
94
- "zoomOut2d",
95
- "autoScale2d",
96
- "resetScale2d",
97
- ]
98
- ),
99
- )
100
- # fig.show()
101
- return fig
@@ -1,46 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING
4
-
5
- import pandas as pd
6
-
7
- from supervisely.nn.benchmark.visualization.vis_metric_base import MetricVis
8
- from supervisely.nn.benchmark.visualization.vis_widgets import Schema, Widget
9
-
10
- if TYPE_CHECKING:
11
- from supervisely.nn.benchmark.visualization.visualizer import Visualizer
12
-
13
-
14
- class PRCurveByClass(MetricVis):
15
-
16
- def __init__(self, loader: Visualizer) -> None:
17
- super().__init__(loader)
18
- self.clickable = True
19
- self.schema = Schema(
20
- self._loader.vis_texts,
21
- markdown_pr_by_class=Widget.Markdown(title="PR Curve by Class"),
22
- chart=Widget.Chart(),
23
- )
24
-
25
- def get_figure(self, widget: Widget.Chart): # -> Optional[go.Figure]:
26
- import plotly.express as px # pylint: disable=import-error
27
-
28
- # Precision-Recall curve per-class
29
- df = pd.DataFrame(self._loader.mp.pr_curve(), columns=self._loader.mp.cat_names)
30
-
31
- fig = px.line(
32
- df,
33
- x=self._loader.mp.recThrs,
34
- y=df.columns,
35
- # title="Precision-Recall Curve per Class",
36
- labels={"x": "Recall", "value": "Precision", "variable": "Category"},
37
- color_discrete_sequence=px.colors.qualitative.Prism,
38
- width=800,
39
- height=600,
40
- )
41
-
42
- fig.update_yaxes(range=[0, 1])
43
- fig.update_xaxes(range=[0, 1])
44
- # fig.show()
45
-
46
- return fig