supervisely 6.73.238__py3-none-any.whl → 6.73.240__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/annotation/annotation.py +2 -2
- supervisely/api/entity_annotation/tag_api.py +11 -4
- supervisely/api/file_api.py +17 -3
- supervisely/nn/__init__.py +1 -0
- supervisely/nn/benchmark/__init__.py +14 -2
- supervisely/nn/benchmark/base_benchmark.py +84 -37
- supervisely/nn/benchmark/base_evaluator.py +120 -0
- supervisely/nn/benchmark/base_visualizer.py +265 -0
- supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -5
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +2 -2
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +39 -16
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +4 -4
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +12 -11
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +6 -6
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +3 -3
- supervisely/nn/benchmark/{instance_segmentation_benchmark.py → instance_segmentation/benchmark.py} +9 -3
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +58 -0
- supervisely/nn/benchmark/{visualization/text_templates/instance_segmentation_text.py → instance_segmentation/text_templates.py} +53 -69
- supervisely/nn/benchmark/instance_segmentation/visualizer.py +18 -0
- supervisely/nn/benchmark/object_detection/__init__.py +0 -0
- supervisely/nn/benchmark/object_detection/base_vis_metric.py +51 -0
- supervisely/nn/benchmark/{object_detection_benchmark.py → object_detection/benchmark.py} +4 -2
- supervisely/nn/benchmark/object_detection/evaluation_params.yaml +2 -0
- supervisely/nn/benchmark/{evaluation/object_detection_evaluator.py → object_detection/evaluator.py} +67 -9
- supervisely/nn/benchmark/{evaluation/coco → object_detection}/metric_provider.py +13 -14
- supervisely/nn/benchmark/{visualization/text_templates/object_detection_text.py → object_detection/text_templates.py} +49 -41
- supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py +48 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confidence_distribution.py +20 -24
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +119 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/confusion_matrix.py +34 -22
- supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +129 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/f1_score_at_different_iou.py +21 -26
- supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py +137 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +136 -0
- supervisely/nn/benchmark/{visualization → object_detection}/vis_metrics/model_predictions.py +53 -49
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +188 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +191 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +116 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +106 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py +49 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +72 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +59 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +71 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py +56 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py +110 -0
- supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py +151 -0
- supervisely/nn/benchmark/object_detection/visualizer.py +697 -0
- supervisely/nn/benchmark/semantic_segmentation/__init__.py +9 -0
- supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py +55 -0
- supervisely/nn/benchmark/semantic_segmentation/benchmark.py +32 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml +0 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +162 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +153 -0
- supervisely/nn/benchmark/semantic_segmentation/text_templates.py +130 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/__init__.py +0 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/acknowledgement.py +15 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/classwise_error_analysis.py +57 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +92 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/explore_predictions.py +84 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +101 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/iou_eou.py +45 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/key_metrics.py +60 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/model_predictions.py +107 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +112 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py +48 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py +178 -0
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py +21 -0
- supervisely/nn/benchmark/semantic_segmentation/visualizer.py +304 -0
- supervisely/nn/benchmark/utils/__init__.py +12 -0
- supervisely/nn/benchmark/utils/detection/__init__.py +2 -0
- supervisely/nn/benchmark/{evaluation/coco → utils/detection}/calculate_metrics.py +6 -4
- supervisely/nn/benchmark/utils/detection/metric_provider.py +533 -0
- supervisely/nn/benchmark/{coco_utils → utils/detection}/sly2coco.py +4 -4
- supervisely/nn/benchmark/{coco_utils/utils.py → utils/detection/utlis.py} +11 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/__init__.py +0 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/calculate_metrics.py +35 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py +804 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/loader.py +65 -0
- supervisely/nn/benchmark/utils/semantic_segmentation/utils.py +109 -0
- supervisely/nn/benchmark/visualization/evaluation_result.py +17 -3
- supervisely/nn/benchmark/visualization/vis_click_data.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/__init__.py +3 -0
- supervisely/nn/benchmark/visualization/widgets/chart/chart.py +12 -4
- supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +35 -8
- supervisely/nn/benchmark/visualization/widgets/gallery/template.html +8 -4
- supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +1 -1
- supervisely/nn/benchmark/visualization/widgets/notification/notification.py +11 -7
- supervisely/nn/benchmark/visualization/widgets/radio_group/__init__.py +0 -0
- supervisely/nn/benchmark/visualization/widgets/radio_group/radio_group.py +34 -0
- supervisely/nn/benchmark/visualization/widgets/table/table.py +9 -3
- supervisely/nn/benchmark/visualization/widgets/widget.py +4 -0
- supervisely/project/project.py +18 -6
- {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/METADATA +3 -1
- {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/RECORD +104 -82
- supervisely/nn/benchmark/coco_utils/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/__init__.py +0 -3
- supervisely/nn/benchmark/evaluation/base_evaluator.py +0 -64
- supervisely/nn/benchmark/evaluation/coco/__init__.py +0 -2
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +0 -88
- supervisely/nn/benchmark/utils.py +0 -13
- supervisely/nn/benchmark/visualization/inference_speed/__init__.py +0 -19
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py +0 -161
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py +0 -28
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +0 -141
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py +0 -63
- supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py +0 -23
- supervisely/nn/benchmark/visualization/vis_metric_base.py +0 -337
- supervisely/nn/benchmark/visualization/vis_metrics/__init__.py +0 -67
- supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py +0 -55
- supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py +0 -93
- supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +0 -144
- supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py +0 -115
- supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py +0 -86
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py +0 -119
- supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py +0 -148
- supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py +0 -109
- supervisely/nn/benchmark/visualization/vis_metrics/overview.py +0 -189
- supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py +0 -101
- supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py +0 -46
- supervisely/nn/benchmark/visualization/vis_metrics/precision.py +0 -56
- supervisely/nn/benchmark/visualization/vis_metrics/recall.py +0 -54
- supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py +0 -57
- supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py +0 -88
- supervisely/nn/benchmark/visualization/vis_metrics/what_is.py +0 -23
- supervisely/nn/benchmark/visualization/vis_templates.py +0 -241
- supervisely/nn/benchmark/visualization/vis_widgets.py +0 -128
- supervisely/nn/benchmark/visualization/visualizer.py +0 -729
- /supervisely/nn/benchmark/{visualization/text_templates → instance_segmentation}/__init__.py +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → instance_segmentation}/evaluation_params.yaml +0 -0
- /supervisely/nn/benchmark/{evaluation/coco → utils/detection}/metrics.py +0 -0
- {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/LICENSE +0 -0
- {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/WHEEL +0 -0
- {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.238.dist-info → supervisely-6.73.240.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict # , Optional
|
|
4
|
+
|
|
5
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
6
|
+
from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OutcomeCounts(DetectionVisMetric):
|
|
10
|
+
MARKDOWN = "outcome_counts"
|
|
11
|
+
CHART = "outcome_counts"
|
|
12
|
+
|
|
13
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
14
|
+
super().__init__(*args, **kwargs)
|
|
15
|
+
self.clickable = True
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def md(self) -> MarkdownWidget:
|
|
19
|
+
text = self.vis_texts.markdown_outcome_counts.format(
|
|
20
|
+
self.vis_texts.definitions.true_positives,
|
|
21
|
+
self.vis_texts.definitions.false_positives,
|
|
22
|
+
self.vis_texts.definitions.false_negatives,
|
|
23
|
+
)
|
|
24
|
+
return MarkdownWidget(self.MARKDOWN, "Outcome Counts", text)
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def chart(self) -> ChartWidget:
|
|
28
|
+
chart = ChartWidget(self.CHART, self._get_figure())
|
|
29
|
+
chart.set_click_data(
|
|
30
|
+
self.explore_modal_table.id,
|
|
31
|
+
self.get_click_data(),
|
|
32
|
+
chart_click_extra="'getKey': (payload) => `${payload.points[0].data.name}`,",
|
|
33
|
+
)
|
|
34
|
+
# self.explore_modal_table.set_click_data(
|
|
35
|
+
# self.diff_modal_table.id,
|
|
36
|
+
# self.get_diff_data(),
|
|
37
|
+
# get_key="(payload) => `${payload.annotation.image_id}`",
|
|
38
|
+
# )
|
|
39
|
+
return chart
|
|
40
|
+
|
|
41
|
+
def _get_figure(self): # -> go.Figure:
|
|
42
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
43
|
+
|
|
44
|
+
fig = go.Figure()
|
|
45
|
+
fig.add_trace(
|
|
46
|
+
go.Bar(
|
|
47
|
+
x=[self.eval_result.mp.TP_count],
|
|
48
|
+
y=["Outcome"],
|
|
49
|
+
name="TP",
|
|
50
|
+
orientation="h",
|
|
51
|
+
marker=dict(color="#8ACAA1"),
|
|
52
|
+
hovertemplate="TP: %{x} objects<extra></extra>",
|
|
53
|
+
)
|
|
54
|
+
)
|
|
55
|
+
fig.add_trace(
|
|
56
|
+
go.Bar(
|
|
57
|
+
x=[self.eval_result.mp.FN_count],
|
|
58
|
+
y=["Outcome"],
|
|
59
|
+
name="FN",
|
|
60
|
+
orientation="h",
|
|
61
|
+
marker=dict(color="#dd3f3f"),
|
|
62
|
+
hovertemplate="FN: %{x} objects<extra></extra>",
|
|
63
|
+
)
|
|
64
|
+
)
|
|
65
|
+
fig.add_trace(
|
|
66
|
+
go.Bar(
|
|
67
|
+
x=[self.eval_result.mp.FP_count],
|
|
68
|
+
y=["Outcome"],
|
|
69
|
+
name="FP",
|
|
70
|
+
orientation="h",
|
|
71
|
+
marker=dict(color="#F7ADAA"),
|
|
72
|
+
hovertemplate="FP: %{x} objects<extra></extra>",
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
fig.update_layout(
|
|
76
|
+
barmode="stack",
|
|
77
|
+
width=600,
|
|
78
|
+
height=300,
|
|
79
|
+
)
|
|
80
|
+
fig.update_xaxes(title_text="Count (objects)")
|
|
81
|
+
fig.update_yaxes(tickangle=-90)
|
|
82
|
+
|
|
83
|
+
fig.update_layout(
|
|
84
|
+
dragmode=False,
|
|
85
|
+
modebar=dict(
|
|
86
|
+
remove=[
|
|
87
|
+
"zoom2d",
|
|
88
|
+
"pan2d",
|
|
89
|
+
"select2d",
|
|
90
|
+
"lasso2d",
|
|
91
|
+
"zoomIn2d",
|
|
92
|
+
"zoomOut2d",
|
|
93
|
+
"autoScale2d",
|
|
94
|
+
"resetScale2d",
|
|
95
|
+
]
|
|
96
|
+
),
|
|
97
|
+
)
|
|
98
|
+
return fig
|
|
99
|
+
|
|
100
|
+
def get_click_data(self) -> Dict:
|
|
101
|
+
if not self.clickable:
|
|
102
|
+
return
|
|
103
|
+
res = {}
|
|
104
|
+
|
|
105
|
+
res["layoutTemplate"] = [None, None, None]
|
|
106
|
+
res["clickData"] = {}
|
|
107
|
+
for outcome, matches_data in self.eval_result.click_data.outcome_counts.items():
|
|
108
|
+
res["clickData"][outcome] = {}
|
|
109
|
+
res["clickData"][outcome]["imagesIds"] = []
|
|
110
|
+
|
|
111
|
+
img_ids = set()
|
|
112
|
+
for match_data in matches_data:
|
|
113
|
+
pairs_data = self.eval_result.matched_pair_data[match_data["gt_img_id"]]
|
|
114
|
+
if outcome == "FN":
|
|
115
|
+
img_ids.add(pairs_data.diff_image_info.id)
|
|
116
|
+
else:
|
|
117
|
+
img_ids.add(pairs_data.pred_image_info.id)
|
|
118
|
+
|
|
119
|
+
res["clickData"][outcome][
|
|
120
|
+
"title"
|
|
121
|
+
] = f"{outcome}: {len(matches_data)} object{'s' if len(matches_data) > 1 else ''}"
|
|
122
|
+
res["clickData"][outcome]["imagesIds"] = list(img_ids)
|
|
123
|
+
res["clickData"][outcome]["filters"] = [
|
|
124
|
+
{
|
|
125
|
+
"type": "tag",
|
|
126
|
+
"tagId": "confidence",
|
|
127
|
+
"value": [self.eval_result.mp.f1_optimal_conf, 1],
|
|
128
|
+
},
|
|
129
|
+
{"type": "tag", "tagId": "outcome", "value": outcome},
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
return res
|
|
133
|
+
|
|
134
|
+
# def get_diff_data(self) -> Dict:
|
|
135
|
+
# res = {}
|
|
136
|
+
|
|
137
|
+
# res["layoutTemplate"] = [
|
|
138
|
+
# {"skipObjectTagsFiltering": True, "columnTitle": "Ground Truth"},
|
|
139
|
+
# {"skipObjectTagsFiltering": ["outcome"], "columnTitle": "Prediction"},
|
|
140
|
+
# {"skipObjectTagsFiltering": ["confidence"], "columnTitle": "Difference"},
|
|
141
|
+
# ]
|
|
142
|
+
|
|
143
|
+
# click_data = res.setdefault("clickData", {})
|
|
144
|
+
# for outcome, matches_data in self.eval_result.click_data.outcome_counts.items():
|
|
145
|
+
# filters = [
|
|
146
|
+
# {
|
|
147
|
+
# "type": "tag",
|
|
148
|
+
# "tagId": "confidence",
|
|
149
|
+
# "value": [self.eval_result.mp.f1_optimal_conf, 1],
|
|
150
|
+
# },
|
|
151
|
+
# {"type": "tag", "tagId": "outcome", "value": outcome},
|
|
152
|
+
# ]
|
|
153
|
+
# for match_data in matches_data:
|
|
154
|
+
# pairs_data = self.eval_result.matched_pair_data[match_data["gt_img_id"]]
|
|
155
|
+
# gt = pairs_data.gt_image_info
|
|
156
|
+
# pred = pairs_data.pred_image_info
|
|
157
|
+
# diff = pairs_data.diff_image_info
|
|
158
|
+
# assert gt.name == pred.name == diff.name
|
|
159
|
+
# for img_id in [pred.id, diff.id]:
|
|
160
|
+
# key = click_data.setdefault(str(img_id), {})
|
|
161
|
+
# key["imagesIds"] = [gt.id, pred.id, diff.id]
|
|
162
|
+
# key["filters"] = filters
|
|
163
|
+
# key["title"] = f"Image: {gt.name}"
|
|
164
|
+
|
|
165
|
+
# object_bindings = []
|
|
166
|
+
# for img in [pred, diff]:
|
|
167
|
+
# if img == pred:
|
|
168
|
+
# ann_json = pairs_data.pred_annotation.to_json()
|
|
169
|
+
# else:
|
|
170
|
+
# ann_json = pairs_data.diff_annotation.to_json()
|
|
171
|
+
# for obj in ann_json["objects"]:
|
|
172
|
+
# for tag in obj["tags"]:
|
|
173
|
+
# if tag["name"] == "matched_gt_id":
|
|
174
|
+
# object_bindings.append(
|
|
175
|
+
# [
|
|
176
|
+
# {
|
|
177
|
+
# "id": obj["id"],
|
|
178
|
+
# "annotationKey": img.id,
|
|
179
|
+
# },
|
|
180
|
+
# {
|
|
181
|
+
# "id": int(tag["value"]),
|
|
182
|
+
# "annotationKey": gt.id if img == pred else pred.id,
|
|
183
|
+
# },
|
|
184
|
+
# ]
|
|
185
|
+
# )
|
|
186
|
+
# key["objectsBindings"] = object_bindings
|
|
187
|
+
|
|
188
|
+
# return res
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Literal
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import pandas as pd
|
|
7
|
+
|
|
8
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
9
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
10
|
+
ChartWidget,
|
|
11
|
+
CollapseWidget,
|
|
12
|
+
ContainerWidget,
|
|
13
|
+
MarkdownWidget,
|
|
14
|
+
RadioGroupWidget,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PerClassOutcomeCounts(DetectionVisMetric):
|
|
19
|
+
MARKDOWN = "per_class_outcome_counts"
|
|
20
|
+
MARKDOWN_2 = "per_class_outcome_counts_2"
|
|
21
|
+
CHART = "per_class_outcome_counts"
|
|
22
|
+
COLLAPSE_TIP = "per_class_outcome_counts_collapse"
|
|
23
|
+
RADIO_GROUP = "per_class_outcome_counts_radio_group"
|
|
24
|
+
|
|
25
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
26
|
+
super().__init__(*args, **kwargs)
|
|
27
|
+
self.clickable = True
|
|
28
|
+
self._keypair_sep: str = "-"
|
|
29
|
+
self.switchable = True
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def md(self) -> MarkdownWidget:
|
|
33
|
+
text = self.vis_texts.markdown_class_outcome_counts_1
|
|
34
|
+
text = text.format(
|
|
35
|
+
self.vis_texts.definitions.true_positives,
|
|
36
|
+
self.vis_texts.definitions.false_positives,
|
|
37
|
+
self.vis_texts.definitions.false_negatives,
|
|
38
|
+
)
|
|
39
|
+
return MarkdownWidget(self.MARKDOWN, "Outcome Counts by Class", text)
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
def md_2(self) -> MarkdownWidget:
|
|
43
|
+
text = self.vis_texts.markdown_class_outcome_counts_2
|
|
44
|
+
text = text.format(self.vis_texts.definitions.f1_score)
|
|
45
|
+
return MarkdownWidget(self.MARKDOWN_2, "Outcome Counts by Class", text)
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def collapse(self) -> CollapseWidget:
|
|
49
|
+
md = MarkdownWidget(
|
|
50
|
+
name=self.COLLAPSE_TIP,
|
|
51
|
+
title="Normalization",
|
|
52
|
+
text=self.vis_texts.markdown_normalization,
|
|
53
|
+
)
|
|
54
|
+
return CollapseWidget([md])
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def chart(self) -> ContainerWidget:
|
|
58
|
+
return ContainerWidget(
|
|
59
|
+
[self.radio_group(), self._get_chart("normalized"), self._get_chart("absolute")],
|
|
60
|
+
self.CHART,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def radio_group(self) -> RadioGroupWidget:
|
|
64
|
+
return RadioGroupWidget(
|
|
65
|
+
"Normalization",
|
|
66
|
+
self.RADIO_GROUP,
|
|
67
|
+
["normalized", "absolute"],
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
def _get_chart(self, switch_key: Literal["normalized", "absolute"]) -> ChartWidget:
|
|
71
|
+
chart = ChartWidget(
|
|
72
|
+
self.CHART,
|
|
73
|
+
self._get_figure(switch_key),
|
|
74
|
+
switch_key=switch_key,
|
|
75
|
+
switchable=self.switchable,
|
|
76
|
+
radiogroup_id=self.RADIO_GROUP,
|
|
77
|
+
)
|
|
78
|
+
chart.set_click_data(
|
|
79
|
+
self.explore_modal_table.id,
|
|
80
|
+
self.get_click_data(),
|
|
81
|
+
chart_click_extra="'getKey': (payload) => `${payload.points[0].label}${'-'}${payload.points[0].data.name}`, 'keySeparator': '-',",
|
|
82
|
+
)
|
|
83
|
+
return chart
|
|
84
|
+
|
|
85
|
+
def _get_figure(self, switch_key: Literal["normalized", "absolute"]): # -> go.Figure:
|
|
86
|
+
import plotly.express as px # pylint: disable=import-error
|
|
87
|
+
|
|
88
|
+
# Per-class Counts
|
|
89
|
+
iou_thres = 0
|
|
90
|
+
|
|
91
|
+
tp = self.eval_result.mp.true_positives[:, iou_thres]
|
|
92
|
+
fp = self.eval_result.mp.false_positives[:, iou_thres]
|
|
93
|
+
fn = self.eval_result.mp.false_negatives[:, iou_thres]
|
|
94
|
+
|
|
95
|
+
# normalize
|
|
96
|
+
support = tp + fn
|
|
97
|
+
with np.errstate(invalid="ignore", divide="ignore"):
|
|
98
|
+
tp_rel = tp / support
|
|
99
|
+
fp_rel = fp / support
|
|
100
|
+
fn_rel = fn / support
|
|
101
|
+
|
|
102
|
+
# sort by f1
|
|
103
|
+
sort_scores = 2 * tp / (2 * tp + fp + fn)
|
|
104
|
+
|
|
105
|
+
K = len(self.eval_result.mp.cat_names)
|
|
106
|
+
sort_indices = np.argsort(sort_scores)
|
|
107
|
+
cat_names_sorted = [self.eval_result.mp.cat_names[i] for i in sort_indices]
|
|
108
|
+
tp_rel, fn_rel, fp_rel = tp_rel[sort_indices], fn_rel[sort_indices], fp_rel[sort_indices]
|
|
109
|
+
|
|
110
|
+
objects_count = np.concatenate([tp[sort_indices], fn[sort_indices], fp[sort_indices]])
|
|
111
|
+
data = {
|
|
112
|
+
"Type": ["TP"] * K + ["FN"] * K + ["FP"] * K,
|
|
113
|
+
"category": cat_names_sorted * 3,
|
|
114
|
+
}
|
|
115
|
+
y_label = ""
|
|
116
|
+
if switch_key == "normalized":
|
|
117
|
+
y_label = "Objects Fraction"
|
|
118
|
+
data["count"] = np.concatenate([tp_rel, fn_rel, fp_rel])
|
|
119
|
+
elif switch_key == "absolute":
|
|
120
|
+
y_label = "Objects Count"
|
|
121
|
+
data["count"] = objects_count
|
|
122
|
+
|
|
123
|
+
df = pd.DataFrame(data)
|
|
124
|
+
|
|
125
|
+
color_map = {"TP": "#8ACAA1", "FN": "#dd3f3f", "FP": "#F7ADAA"}
|
|
126
|
+
fig = px.bar(
|
|
127
|
+
df,
|
|
128
|
+
x="category",
|
|
129
|
+
y="count",
|
|
130
|
+
color="Type",
|
|
131
|
+
height=500,
|
|
132
|
+
width=1000,
|
|
133
|
+
labels={"count": y_label, "category": "Class"},
|
|
134
|
+
color_discrete_map=color_map,
|
|
135
|
+
)
|
|
136
|
+
xaxis_title = fig.layout.xaxis.title.text
|
|
137
|
+
yaxis_title = fig.layout.yaxis.title.text
|
|
138
|
+
if switch_key == "normalized":
|
|
139
|
+
|
|
140
|
+
fig.update_traces(
|
|
141
|
+
hovertemplate="Type=%{fullData.name} <br>"
|
|
142
|
+
+ xaxis_title
|
|
143
|
+
+ "=%{x}<br>"
|
|
144
|
+
+ yaxis_title
|
|
145
|
+
+ "=%{y:.2f}<extra></extra>"
|
|
146
|
+
)
|
|
147
|
+
elif switch_key == "absolute":
|
|
148
|
+
fig.update_traces(
|
|
149
|
+
hovertemplate="Type=%{fullData.name} <br>"
|
|
150
|
+
+ xaxis_title
|
|
151
|
+
+ "=%{x}<br>"
|
|
152
|
+
+ yaxis_title
|
|
153
|
+
+ "=%{y}<extra></extra>",
|
|
154
|
+
)
|
|
155
|
+
return fig
|
|
156
|
+
|
|
157
|
+
def get_click_data(self) -> Dict:
|
|
158
|
+
if not self.clickable:
|
|
159
|
+
return
|
|
160
|
+
res = {}
|
|
161
|
+
res["layoutTemplate"] = [None, None, None]
|
|
162
|
+
|
|
163
|
+
res["clickData"] = {}
|
|
164
|
+
for class_name, v1 in self.eval_result.click_data.outcome_counts_by_class.items():
|
|
165
|
+
for outcome, matches_data in v1.items():
|
|
166
|
+
key = class_name + self._keypair_sep + outcome
|
|
167
|
+
res["clickData"][key] = {}
|
|
168
|
+
res["clickData"][key]["imagesIds"] = []
|
|
169
|
+
res["clickData"][key][
|
|
170
|
+
"title"
|
|
171
|
+
] = f"Images with objects of class '{class_name}' and outcome '{outcome}'"
|
|
172
|
+
|
|
173
|
+
img_ids = set()
|
|
174
|
+
for match_data in matches_data:
|
|
175
|
+
img_comparison_data = self.eval_result.matched_pair_data[
|
|
176
|
+
match_data["gt_img_id"]
|
|
177
|
+
]
|
|
178
|
+
if outcome == "FN":
|
|
179
|
+
img_ids.add(img_comparison_data.diff_image_info.id)
|
|
180
|
+
else:
|
|
181
|
+
img_ids.add(img_comparison_data.pred_image_info.id)
|
|
182
|
+
res["clickData"][key]["imagesIds"] = list(img_ids)
|
|
183
|
+
res["clickData"][key]["filters"] = [
|
|
184
|
+
{
|
|
185
|
+
"type": "tag",
|
|
186
|
+
"tagId": "confidence",
|
|
187
|
+
"value": [self.eval_result.mp.f1_optimal_conf, 1],
|
|
188
|
+
},
|
|
189
|
+
{"type": "tag", "tagId": "outcome", "value": outcome},
|
|
190
|
+
]
|
|
191
|
+
return res
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
from typing import List
|
|
3
|
+
|
|
4
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
5
|
+
from supervisely.nn.benchmark.visualization.widgets import MarkdownWidget
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Overview(DetectionVisMetric):
|
|
9
|
+
|
|
10
|
+
def get_header(self, user_login: str) -> MarkdownWidget:
|
|
11
|
+
current_date = datetime.datetime.now().strftime("%d %B %Y, %H:%M")
|
|
12
|
+
header_text = self.vis_texts.markdown_header.format(
|
|
13
|
+
self.eval_result.name, user_login, current_date
|
|
14
|
+
)
|
|
15
|
+
header = MarkdownWidget("markdown_header", "Header", text=header_text)
|
|
16
|
+
return header
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def md(self) -> List[MarkdownWidget]:
|
|
20
|
+
url = self.eval_result.inference_info.get("checkpoint_url")
|
|
21
|
+
link_text = self.eval_result.inference_info.get("custom_checkpoint_path")
|
|
22
|
+
if link_text is None:
|
|
23
|
+
link_text = url
|
|
24
|
+
link_text = link_text.replace("_", "\_")
|
|
25
|
+
|
|
26
|
+
model_name = self.eval_result.inference_info.get("model_name") or "Custom"
|
|
27
|
+
checkpoint_name = self.eval_result.inference_info.get("deploy_params", {}).get(
|
|
28
|
+
"checkpoint_name", ""
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Note about validation dataset
|
|
32
|
+
classes_str, note_about_images, starter_app_info = self._get_overview_info()
|
|
33
|
+
|
|
34
|
+
# link to scroll to the optimal confidence section
|
|
35
|
+
opt_conf_url = self.vis_texts.docs_url + "#f1-optimal-confidence-threshold"
|
|
36
|
+
|
|
37
|
+
formats = [
|
|
38
|
+
model_name.replace("_", "\_"),
|
|
39
|
+
checkpoint_name.replace("_", "\_"),
|
|
40
|
+
self.eval_result.inference_info.get("architecture"),
|
|
41
|
+
self.eval_result.inference_info.get("task_type"),
|
|
42
|
+
self.eval_result.inference_info.get("runtime"),
|
|
43
|
+
url,
|
|
44
|
+
link_text,
|
|
45
|
+
self.eval_result.gt_project_info.id,
|
|
46
|
+
self.eval_result.gt_project_info.name,
|
|
47
|
+
classes_str,
|
|
48
|
+
note_about_images,
|
|
49
|
+
starter_app_info,
|
|
50
|
+
self.eval_result.mp.iou_threshold,
|
|
51
|
+
round(self.eval_result.mp.f1_optimal_conf, 4),
|
|
52
|
+
opt_conf_url,
|
|
53
|
+
self.vis_texts.docs_url,
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
md = MarkdownWidget(
|
|
57
|
+
"markdown_overview",
|
|
58
|
+
"Overview",
|
|
59
|
+
text=self.vis_texts.markdown_overview.format(*formats),
|
|
60
|
+
)
|
|
61
|
+
md.is_info_block = True
|
|
62
|
+
md.width_fit_content = True
|
|
63
|
+
return md
|
|
64
|
+
|
|
65
|
+
def _get_overview_info(self):
|
|
66
|
+
classes_cnt = len(self.eval_result.classes_whitelist)
|
|
67
|
+
classes_str = "classes" if classes_cnt > 1 else "class"
|
|
68
|
+
classes_str = f"{classes_cnt} {classes_str}"
|
|
69
|
+
|
|
70
|
+
evaluator_session, train_session, images_str = None, None, ""
|
|
71
|
+
gt_project_id = self.eval_result.gt_project_info.id
|
|
72
|
+
gt_dataset_ids = self.eval_result.gt_dataset_ids
|
|
73
|
+
gt_images_cnt = self.eval_result.val_images_cnt
|
|
74
|
+
train_info = self.eval_result.train_info
|
|
75
|
+
evaluator_app_info = self.eval_result.evaluator_app_info
|
|
76
|
+
total_imgs_cnt = self.eval_result.gt_project_info.items_count
|
|
77
|
+
if gt_images_cnt is not None:
|
|
78
|
+
val_imgs_cnt = gt_images_cnt
|
|
79
|
+
elif gt_dataset_ids is not None:
|
|
80
|
+
datasets = self.eval_result.gt_dataset_infos
|
|
81
|
+
val_imgs_cnt = sum(ds.items_count for ds in datasets)
|
|
82
|
+
else:
|
|
83
|
+
val_imgs_cnt = self.eval_result.pred_project_info.items_count
|
|
84
|
+
|
|
85
|
+
if train_info:
|
|
86
|
+
train_task_id = train_info.get("app_session_id")
|
|
87
|
+
if train_task_id:
|
|
88
|
+
app_id = self.eval_result.task_info["meta"]["app"]["id"]
|
|
89
|
+
train_session = f'- **Training dashboard**: <a href="/apps/{app_id}/sessions/{train_task_id}" target="_blank">open</a>'
|
|
90
|
+
|
|
91
|
+
train_imgs_cnt = train_info.get("images_count")
|
|
92
|
+
images_str = f", {train_imgs_cnt} images in train, {val_imgs_cnt} images in validation"
|
|
93
|
+
|
|
94
|
+
if gt_images_cnt is not None:
|
|
95
|
+
images_str += (
|
|
96
|
+
f", total {total_imgs_cnt} images. Evaluated using subset - {gt_images_cnt} images"
|
|
97
|
+
)
|
|
98
|
+
elif gt_dataset_ids is not None:
|
|
99
|
+
links = [
|
|
100
|
+
f'<a href="/projects/{gt_project_id}/datasets/{ds.id}" target="_blank">{ds.name}</a>'
|
|
101
|
+
for ds in datasets
|
|
102
|
+
]
|
|
103
|
+
images_str += f", total {total_imgs_cnt} images. Evaluated on the dataset{'s' if len(links) > 1 else ''}: {', '.join(links)}"
|
|
104
|
+
else:
|
|
105
|
+
images_str += f", total {total_imgs_cnt} images. Evaluated on the whole project ({val_imgs_cnt} images)"
|
|
106
|
+
|
|
107
|
+
if evaluator_app_info:
|
|
108
|
+
evaluator_task_id = evaluator_app_info.get("id")
|
|
109
|
+
evaluator_app_id = evaluator_app_info.get("meta", {}).get("app", {}).get("id")
|
|
110
|
+
evaluator_app_name = evaluator_app_info.get("meta", {}).get("app", {}).get("name")
|
|
111
|
+
if evaluator_task_id and evaluator_app_id and evaluator_app_name:
|
|
112
|
+
evaluator_session = f'- **Evaluator app session**: <a href="/apps/{evaluator_app_id}/sessions/{evaluator_task_id}" target="_blank">open</a>'
|
|
113
|
+
|
|
114
|
+
starter_app_info = train_session or evaluator_session or ""
|
|
115
|
+
|
|
116
|
+
return classes_str, images_str, starter_app_info
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
6
|
+
from supervisely.nn.benchmark.visualization.widgets import (
|
|
7
|
+
ChartWidget,
|
|
8
|
+
CollapseWidget,
|
|
9
|
+
MarkdownWidget,
|
|
10
|
+
NotificationWidget,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PRCurve(DetectionVisMetric):
|
|
15
|
+
MARKDOWN = "pr_curve"
|
|
16
|
+
NOTIFICATION = "pr_curve"
|
|
17
|
+
COLLAPSE = "pr_curve"
|
|
18
|
+
CHART = "pr_curve"
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def md(self) -> MarkdownWidget:
|
|
22
|
+
text = self.vis_texts.markdown_pr_curve.format(self.vis_texts.definitions.f1_score)
|
|
23
|
+
return MarkdownWidget(self.MARKDOWN, "Precision-Recall Curve", text)
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def notification(self) -> NotificationWidget:
|
|
27
|
+
title, _ = self.vis_texts.notification_ap.values()
|
|
28
|
+
return NotificationWidget(
|
|
29
|
+
self.NOTIFICATION,
|
|
30
|
+
title.format(self.eval_result.mp.base_metrics()["mAP"].round(2)),
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def chart(self) -> ChartWidget:
|
|
35
|
+
return ChartWidget(self.CHART, self._get_figure())
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
def collapse(self) -> CollapseWidget:
|
|
39
|
+
md1 = MarkdownWidget(
|
|
40
|
+
"pr_curve",
|
|
41
|
+
"About Trade-offs between precision and recall",
|
|
42
|
+
self.vis_texts.markdown_trade_offs,
|
|
43
|
+
)
|
|
44
|
+
md2 = MarkdownWidget(
|
|
45
|
+
"what_is_pr_curve",
|
|
46
|
+
"How the PR curve is built?",
|
|
47
|
+
self.vis_texts.markdown_what_is_pr_curve.format(
|
|
48
|
+
self.vis_texts.definitions.confidence_score,
|
|
49
|
+
self.vis_texts.definitions.true_positives,
|
|
50
|
+
self.vis_texts.definitions.false_positives,
|
|
51
|
+
),
|
|
52
|
+
)
|
|
53
|
+
return CollapseWidget([md1, md2])
|
|
54
|
+
|
|
55
|
+
def _get_figure(self): # -> go.Figure:
|
|
56
|
+
import plotly.express as px # pylint: disable=import-error
|
|
57
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
58
|
+
|
|
59
|
+
pr_curve = self.eval_result.mp.pr_curve().copy()
|
|
60
|
+
pr_curve[pr_curve == -1] = np.nan # -1 is a placeholder for no GT
|
|
61
|
+
pr_curve = np.nanmean(pr_curve, axis=-1)
|
|
62
|
+
fig = px.line(
|
|
63
|
+
x=self.eval_result.mp.recThrs,
|
|
64
|
+
y=pr_curve,
|
|
65
|
+
labels={"x": "Recall", "y": "Precision"},
|
|
66
|
+
width=600,
|
|
67
|
+
height=500,
|
|
68
|
+
)
|
|
69
|
+
fig.data[0].name = "Model"
|
|
70
|
+
fig.data[0].showlegend = True
|
|
71
|
+
fig.update_traces(fill="tozeroy", line=dict(color="#1f77b4"))
|
|
72
|
+
fig.add_trace(
|
|
73
|
+
go.Scatter(
|
|
74
|
+
x=self.eval_result.mp.recThrs,
|
|
75
|
+
y=[1] * len(self.eval_result.mp.recThrs),
|
|
76
|
+
name="Perfect",
|
|
77
|
+
line=dict(color="orange", dash="dash"),
|
|
78
|
+
showlegend=True,
|
|
79
|
+
)
|
|
80
|
+
)
|
|
81
|
+
fig.add_annotation(
|
|
82
|
+
text=f"mAP = {self.eval_result.mp.base_metrics()['mAP']:.2f}",
|
|
83
|
+
xref="paper",
|
|
84
|
+
yref="paper",
|
|
85
|
+
x=0.98,
|
|
86
|
+
y=0.92,
|
|
87
|
+
showarrow=False,
|
|
88
|
+
bgcolor="white",
|
|
89
|
+
)
|
|
90
|
+
fig.update_traces(hovertemplate="Recall: %{x:.2f}<br>Precision: %{y:.2f}<extra></extra>")
|
|
91
|
+
fig.update_layout(
|
|
92
|
+
dragmode=False,
|
|
93
|
+
modebar=dict(
|
|
94
|
+
remove=[
|
|
95
|
+
"zoom2d",
|
|
96
|
+
"pan2d",
|
|
97
|
+
"select2d",
|
|
98
|
+
"lasso2d",
|
|
99
|
+
"zoomIn2d",
|
|
100
|
+
"zoomOut2d",
|
|
101
|
+
"autoScale2d",
|
|
102
|
+
"resetScale2d",
|
|
103
|
+
]
|
|
104
|
+
),
|
|
105
|
+
)
|
|
106
|
+
return fig
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
|
|
5
|
+
from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
|
|
6
|
+
from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PRCurveByClass(DetectionVisMetric):
|
|
10
|
+
MARKDOWN = "pr_curve_by_class"
|
|
11
|
+
CHART = "pr_curve_by_class"
|
|
12
|
+
|
|
13
|
+
def __init__(self, *args, **kwargs) -> None:
|
|
14
|
+
super().__init__(*args, **kwargs)
|
|
15
|
+
self.clickable = True
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def md(self) -> MarkdownWidget:
|
|
19
|
+
text = self.vis_texts.markdown_pr_by_class
|
|
20
|
+
return MarkdownWidget(self.MARKDOWN, "PR Curve by Class", text)
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def chart(self) -> ChartWidget:
|
|
24
|
+
chart = ChartWidget(self.CHART, self._get_figure())
|
|
25
|
+
chart.set_click_data(
|
|
26
|
+
self.explore_modal_table.id,
|
|
27
|
+
self.get_click_data(),
|
|
28
|
+
chart_click_extra="'getKey': (payload) => `${payload.points[0].data.legendgroup}`,",
|
|
29
|
+
)
|
|
30
|
+
return chart
|
|
31
|
+
|
|
32
|
+
def _get_figure(self): # -> go.Figure:
|
|
33
|
+
import plotly.express as px # pylint: disable=import-error
|
|
34
|
+
|
|
35
|
+
df = pd.DataFrame(self.eval_result.mp.pr_curve(), columns=self.eval_result.mp.cat_names)
|
|
36
|
+
|
|
37
|
+
fig = px.line(
|
|
38
|
+
df,
|
|
39
|
+
x=self.eval_result.mp.recThrs,
|
|
40
|
+
y=df.columns,
|
|
41
|
+
labels={"x": "Recall", "value": "Precision", "variable": "Category"},
|
|
42
|
+
color_discrete_sequence=px.colors.qualitative.Prism,
|
|
43
|
+
width=800,
|
|
44
|
+
height=600,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
fig.update_yaxes(range=[0, 1])
|
|
48
|
+
fig.update_xaxes(range=[0, 1])
|
|
49
|
+
return fig
|