supervisely 6.73.213__py3-none-any.whl → 6.73.215__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (51) hide show
  1. supervisely/app/widgets/report_thumbnail/report_thumbnail.py +17 -5
  2. supervisely/app/widgets/team_files_selector/team_files_selector.py +3 -0
  3. supervisely/io/network_exceptions.py +89 -32
  4. supervisely/nn/benchmark/comparison/__init__.py +0 -0
  5. supervisely/nn/benchmark/comparison/detection_visualization/__init__.py +0 -0
  6. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +437 -0
  7. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py +27 -0
  8. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +125 -0
  9. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +224 -0
  10. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predicttions.py +112 -0
  11. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py +161 -0
  12. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +336 -0
  13. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +249 -0
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +142 -0
  15. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +300 -0
  16. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py +308 -0
  17. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/vis_metric.py +19 -0
  18. supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +298 -0
  19. supervisely/nn/benchmark/comparison/model_comparison.py +84 -0
  20. supervisely/nn/benchmark/evaluation/coco/metric_provider.py +9 -7
  21. supervisely/nn/benchmark/visualization/evaluation_result.py +266 -0
  22. supervisely/nn/benchmark/visualization/renderer.py +100 -0
  23. supervisely/nn/benchmark/visualization/report_template.html +46 -0
  24. supervisely/nn/benchmark/visualization/visualizer.py +1 -1
  25. supervisely/nn/benchmark/visualization/widgets/__init__.py +17 -0
  26. supervisely/nn/benchmark/visualization/widgets/chart/__init__.py +0 -0
  27. supervisely/nn/benchmark/visualization/widgets/chart/chart.py +72 -0
  28. supervisely/nn/benchmark/visualization/widgets/chart/template.html +16 -0
  29. supervisely/nn/benchmark/visualization/widgets/collapse/__init__.py +0 -0
  30. supervisely/nn/benchmark/visualization/widgets/collapse/collapse.py +33 -0
  31. supervisely/nn/benchmark/visualization/widgets/container/__init__.py +0 -0
  32. supervisely/nn/benchmark/visualization/widgets/container/container.py +54 -0
  33. supervisely/nn/benchmark/visualization/widgets/gallery/__init__.py +0 -0
  34. supervisely/nn/benchmark/visualization/widgets/gallery/gallery.py +125 -0
  35. supervisely/nn/benchmark/visualization/widgets/gallery/template.html +49 -0
  36. supervisely/nn/benchmark/visualization/widgets/markdown/__init__.py +0 -0
  37. supervisely/nn/benchmark/visualization/widgets/markdown/markdown.py +53 -0
  38. supervisely/nn/benchmark/visualization/widgets/notification/__init__.py +0 -0
  39. supervisely/nn/benchmark/visualization/widgets/notification/notification.py +38 -0
  40. supervisely/nn/benchmark/visualization/widgets/sidebar/__init__.py +0 -0
  41. supervisely/nn/benchmark/visualization/widgets/sidebar/sidebar.py +67 -0
  42. supervisely/nn/benchmark/visualization/widgets/table/__init__.py +0 -0
  43. supervisely/nn/benchmark/visualization/widgets/table/table.py +116 -0
  44. supervisely/nn/benchmark/visualization/widgets/widget.py +22 -0
  45. supervisely/nn/inference/cache.py +8 -5
  46. {supervisely-6.73.213.dist-info → supervisely-6.73.215.dist-info}/METADATA +5 -5
  47. {supervisely-6.73.213.dist-info → supervisely-6.73.215.dist-info}/RECORD +51 -12
  48. {supervisely-6.73.213.dist-info → supervisely-6.73.215.dist-info}/LICENSE +0 -0
  49. {supervisely-6.73.213.dist-info → supervisely-6.73.215.dist-info}/WHEEL +0 -0
  50. {supervisely-6.73.213.dist-info → supervisely-6.73.215.dist-info}/entry_points.txt +0 -0
  51. {supervisely-6.73.213.dist-info → supervisely-6.73.215.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,437 @@
1
+ from types import SimpleNamespace
2
+
3
+ docs_url = "https://docs.supervisely.com/neural-networks/model-evaluation-benchmark/"
4
+
5
+ definitions = SimpleNamespace(
6
+ true_positives="True Positives (TP): These are correctly detected objects. For a prediction to be counted as a true positive, the predicted bounding box must align with a ground truth bounding box with an Intersection over Union (IoU) of 0.5 or more, and the object must be correctly classified",
7
+ false_positives="False Positives (FP): These are incorrect detections made by the model. They occur when the model predicts a bounding box that either does not overlap sufficiently with any ground truth box (IoU less than 0.5) or incorrectly classifies the object within the bounding box. For example, the model detects a car in the image, but there is no car in the ground truth.",
8
+ false_negatives="False Negatives (FN): These are the missed detections. They occur when an actual object in the ground truth is not detected by the model, meaning there is no predicted bounding box with an IoU of 0.5 or more for this object. For example, there is a car in the image, but the model fails to detect it.",
9
+ confidence_threshold="Confidence threshold is a hyperparameter used to filter out predictions that the model is not confident in. By setting a higher confidence threshold, you ensure that only the most certain predictions are considered, thereby reducing the number of false predictions. This helps to control the trade-off between precision and recall in the model's output.",
10
+ confidence_score="The confidence score, also known as probability score, quantifies how confident the model is that its prediction is correct. It is a numerical value between 0 and 1, generated by the model for each bounding box, that represents the likelihood that a predicted bounding box contains an object of a particular class.",
11
+ f1_score="F1-score is a useful metric that combines both precision and recall into a single measure. As the harmonic mean of precision and recall, the f1-score provides a balanced representation of both metrics in one value. F1-score ranges from 0 to 1, with a higher score indicating better model performance. It is calculated as 2 * (precision * recall) / (precision + recall).",
12
+ average_precision="Average precision (AP) is computed as the area under the precision-recall curve. It measures the precision of the model at different recall levels and provides a single number that summarizes the trade-off between precision and recall for a given class.",
13
+ about_pr_tradeoffs="A system with high recall but low precision returns many results, but most of its predictions are incorrect or redundant (false positive). A system with high precision but low recall is just the opposite, returning very few results, most of its predictions are correct. An ideal system with high precision and high recall will return many results, with all results predicted correctly.",
14
+ iou_score="IoU score is a measure of overlap between predicted bounding box and ground truth bounding box. A higher IoU score indicates better alignment between the predicted and ground truth bounding boxes.",
15
+ iou_threshold="The IoU threshold is a predefined value (set to 0.5 in many benchmarks) that determines the minimum acceptable IoU score for a predicted bounding box to be considered a correct prediction. When the IoU of a predicted box and actual box is higher than this IoU threshold, the prediction is considered correct. Some metrics will evaluate the model with different IoU thresholds to provide more insights about the model's performance.",
16
+ )
17
+
18
+ # <i class="zmdi zmdi-check-circle" style="color: #13ce66; margin-right: 5px"></i>
19
+ clickable_label = """
20
+ > <span style="color: #5a6772">
21
+ > Click on the chart to explore corresponding images.
22
+ > </span>
23
+ """
24
+
25
+ markdown_header = """
26
+ <h1>{}</h1>
27
+
28
+ <div class="model-info-block">
29
+ <div>Created by <b>{}</b></div>
30
+ <div><i class="zmdi zmdi-calendar-alt"></i><span>{}</span></div>
31
+ </div>
32
+ """
33
+
34
+ markdown_common_overview = """
35
+ - **Models**: {}
36
+ - **Evaluation Dataset**: <a href="/projects/{}/datasets" target="_blank">{}</a>
37
+ - **Task type**: {}
38
+ """
39
+
40
+ markdown_overview_info = """
41
+ <h3>{}</h3>
42
+ - **Model**: {}
43
+ - **Checkpoint**: {}
44
+ - **Architecture**: {}
45
+ - **Runtime**: {}
46
+ - **Checkpoint file**: <a class="checkpoint-url" href="{}" target="_blank">{}</a>
47
+ - **Evaluation Report**: <a href="{}" target="_blank">View Report</a>
48
+
49
+ """
50
+
51
+ markdown_overview = """
52
+ - **Model**: {}
53
+ - **Checkpoint**: {}
54
+ - **Architecture**: {}
55
+ - **Task type**: {}
56
+ - **Runtime**: {}
57
+ - **Checkpoint file**: <a class="checkpoint-url" href="{}" target="_blank">{}</a>
58
+ - **Ground Truth project**: <a href="/projects/{}/datasets" target="_blank">{}</a>, {}{}
59
+ {}
60
+
61
+ Learn more about Model Benchmark, implementation details, and how to use the charts in our <a href="{}" target="_blank">Technical Report</a>.
62
+ """
63
+ # - **Model**: {}
64
+ # - **Training dataset (?)**: COCO 2017 train
65
+ # - **Model classes (?)**: (80): a, b, c, … (collapse)
66
+ # - **Model weights (?)**: [/path/to/yolov8l.pt]()
67
+ # - **License (?)**: AGPL-3.0
68
+
69
+ markdown_key_metrics = """## Key Metrics
70
+
71
+ Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy, Calibration Score, and Inference Speed.
72
+
73
+ - **Mean Average Precision (mAP)**: A comprehensive metric of detection performance. mAP calculates the <abbr title="{}">average precision</abbr> across all classes at different levels of <abbr title="{}">IoU thresholds</abbr> and precision-recall trade-offs. In other words, it evaluates the performance of a model by considering its ability to detect and localize objects accurately across multiple IoU thresholds and object categories.
74
+ - **Precision**: Precision indicates how often the model's predictions are actually correct when it predicts an object. This calculates the ratio of correct detections to the total number of detections made by the model.
75
+ - **Recall**: Recall measures the model's ability to find all relevant objects in a dataset. This calculates the ratio of correct detections to the total number of instances in a dataset.
76
+ - **Intersection over Union (IoU)**: IoU measures how closely predicted bounding boxes match the actual (ground truth) bounding boxes. It is calculated as the area of overlap between the predicted bounding box and the ground truth bounding box, divided by the area of union of these bounding boxes.
77
+ - **Classification Accuracy**: We additionally measure the classification accuracy of an object detection model. This metric represents the percentage of correctly labeled instances among all correctly localized bounding boxes (where the IoU for each box is greater than 0.5, regardless of class).
78
+ - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
79
+ - **Inference Speed**: The number of frames per second (FPS) the model can process, measured with a batch size of 1. The inference speed is important in applications, where real-time object detection is required. Additionally, slower models pour more GPU resources, so their inference cost is higher.
80
+ """
81
+
82
+ markdown_explorer = """## Explore Predictions
83
+ In this section you can visually assess the model performance through examples. This helps users better understand model capabilities and limitations, giving an intuitive grasp of prediction quality in different scenarios.
84
+
85
+ > Click on the image to view the **Ground Truth**, **Prediction**, and **Difference** annotations side-by-side.
86
+
87
+ > Filtering options allow you to adjust the confidence threshold (only for predictions) and the model's false outcomes (only for differences). Differences are calculated only for the optimal confidence threshold, allowing you to focus on the most accurate predictions made by the model.
88
+ """
89
+
90
+ markdown_explore_difference = """## Explore Predictions
91
+
92
+ In this section, you can explore predictions made by different models side-by-side. This helps you to understand the differences in predictions made by each model, and to identify which model performs better in different scenarios.
93
+
94
+
95
+ > Click on the image to view the **Ground Truth**, and **Prediction** annotations side-by-side.
96
+ """
97
+ ### Difference in Predictions
98
+
99
+ markdown_explore_same_errors = """
100
+ ### Same Errors
101
+
102
+ This section helps you to identify samples where all models made the same errors. It is useful for understanding the limitations of the models and the common challenges they face.
103
+
104
+ > Click on the image to view the **Ground Truth**, and **Prediction** annotations side-by-side.
105
+ """
106
+
107
+ markdown_predictions_gallery = """
108
+
109
+ """
110
+ # You can choose one of the sorting method:
111
+
112
+ # - **Auto**: The algorithm is trying to gather a diverse set of images that illustrate the model's performance across various scenarios.
113
+ # - **Least accurate**: Displays images where the model made more errors.
114
+ # - **Most accurate**: Displays images where the model made fewer or no errors.
115
+ # - **Dataset order**: Displays images in the original order of the dataset.
116
+ # """
117
+
118
+ markdown_predictions_table = """### Prediction details for every image
119
+
120
+ The table helps you in finding samples with specific cases of interest. You can sort by parameters such as the number of predictions, or specific a metric, e.g, recall, then click on a row to view this image and predictions.
121
+
122
+ **Example**: you can sort by **FN** (False Negatives) in descending order to identify samples where the model failed to detect many objects.
123
+
124
+
125
+ > Click on the row to view the image with **Ground Truth**, **Prediction**, or the **Difference** annotations.
126
+ """
127
+
128
+ markdown_what_is = """
129
+ """
130
+
131
+ markdown_experts = """
132
+ """
133
+
134
+ markdown_how_to_use = """
135
+ """
136
+
137
+ markdown_outcome_counts = (
138
+ """## Outcome Counts
139
+
140
+ This chart is used to evaluate the overall model performance by breaking down all predictions into <abbr title="{}">True Positives</abbr> (TP), <abbr title="{}">False Positives</abbr> (FP), and <abbr title="{}">False Negatives</abbr> (FN). This helps to visually assess the type of errors the model often encounters.
141
+
142
+ """
143
+ + clickable_label
144
+ )
145
+
146
+ markdown_outcome_counts_diff = (
147
+ """### Outcome Counts Differences
148
+
149
+ This chart compares the outcomes between different models. It helps identify where the models agree and disagree in their predictions. The 'Common' bar represents cases where the models made identical predictions, regardless of whether those predictions were correct or incorrect. The individual model bars show instances where that particular model made unique predictions that differed from other models.
150
+
151
+ """
152
+ + clickable_label
153
+ )
154
+
155
+ markdown_precision_per_class_title = """### Precision by Class"""
156
+
157
+ markdown_recall_per_class_title = """### Recall by Class"""
158
+
159
+ markdown_f1_per_class_title = """### F1-score by Class"""
160
+
161
+ markdown_R = """## Recall
162
+
163
+ This section measures the ability of the model to detect **all relevant instances in the dataset**. In other words, it answers the question: “Of all instances in the dataset, how many of them is the model managed to find out?”
164
+
165
+ To measure this, we calculate **Recall**. Recall counts errors, when the model does not detect an object that actually is present in a dataset and should be detected. Recall is calculated as the portion of correct predictions (true positives) over all instances in the dataset (true positives + false negatives).
166
+ """
167
+
168
+ notification_recall = {
169
+ "title": "Recall = {}",
170
+ "description": "The model correctly found <b>{} of {}</b> total instances in the dataset.",
171
+ }
172
+
173
+ markdown_R_perclass = (
174
+ """### Per-class Recall
175
+
176
+ This chart further analyzes Recall, breaking it down to each class in separate.
177
+
178
+ Since the overall recall is calculated as an average across all classes, we provide a chart showing the recall for each individual class. This illustrates how much each class contributes to the overall recall.
179
+
180
+ _Bars in the chart are sorted by <abbr title="{}">F1-score</abbr> to keep a unified order of classes between different charts._
181
+
182
+ """
183
+ + clickable_label
184
+ )
185
+
186
+
187
+ markdown_P = """## Precision
188
+
189
+ This section measures the accuracy of all predictions made by the model. In other words, it answers the question: “Of all predictions made by the model, how many of them are actually correct?”.
190
+
191
+ To measure this, we calculate **Precision**. Precision counts errors, when the model predicts an object (bounding box), but the image has no objects of the predicted class in this place. Precision is calculated as a portion of correct predictions (true positives) over all model’s predictions (true positives + false positives).
192
+ """
193
+
194
+ notification_precision = {
195
+ "title": "Precision = {}",
196
+ "description": "The model correctly predicted <b>{} of {}</b> predictions made by the model in total.",
197
+ }
198
+
199
+ markdown_P_perclass = (
200
+ """### Per-class Precision
201
+
202
+ This chart further analyzes Precision, breaking it down to each class in separate.
203
+
204
+ Since the overall precision is computed as an average across all classes, we provide a chart showing the precision for each class individually. This illustrates how much each class contributes to the overall precision.
205
+
206
+ _Bars in the chart are sorted by <abbr title="{}">F1-score</abbr> to keep a unified order of classes between different charts._
207
+
208
+ """
209
+ + clickable_label
210
+ )
211
+
212
+
213
+ markdown_PR = (
214
+ """## Recall vs. Precision
215
+
216
+ This section compares Precision and Recall in one graph, identifying **imbalance** between these two.
217
+
218
+ _Bars in the chart are sorted by <abbr title="{}">F1-score</abbr> to keep a unified order of classes between different charts._
219
+
220
+ """
221
+ + clickable_label
222
+ )
223
+
224
+ markdown_PRF1 = """## Precision, Recall, F1-score
225
+
226
+ This section compares Precision, Recall, and F1-score metrics. The first graph identifies imbalance between these metrics. On the next charts, you can see each metric separately, broken down by class.
227
+
228
+ _Bars in the chart are sorted by <abbr title="{}">F1-score</abbr> to keep a unified order of classes between different charts._
229
+ """
230
+
231
+ markdown_pr_curve = """## mAP & Precision-Recall Curve
232
+
233
+ Precision-Recall curve is an overall performance indicator. It helps to visually assess both precision and recall for all predictions made by the model on the whole dataset. This gives you an understanding of how precision changes as you attempt to increase recall, providing a view of **trade-offs between precision and recall** <abbr title="{}">(?)</abbr>. Ideally, a high-quality model will maintain strong precision as recall increases. This means that as you move from left to right on the curve, there should not be a significant drop in precision. Such a model is capable of finding many relevant instances, maintaining a high level of precision.
234
+ """
235
+
236
+ markdown_trade_offs = """- A system with high recall but low precision generates many results, but most of its predictions are incorrect or redundant (false positives).
237
+
238
+ - Conversely, a system with high precision but low recall produces very few results, but most of its predictions are accurate.
239
+
240
+ - The ideal system achieves both high precision and high recall, meaning it returns many results with a high accuracy rate.
241
+ """
242
+
243
+ markdown_what_is_pr_curve = """1. **Sort predictions**: Arrange all bounding box predictions by their <abbr title="{}">confidence scores</abbr> in descending order.
244
+
245
+ 2. **Classify outcomes**: For each prediction, determine if it is a <abbr title="{}">true positive</abbr> (TP) or a <abbr title="{}">false positive</abbr> (FP) and record these classifications in a table.
246
+
247
+ 3. **Calculate cumulative metrics**: As you move through each prediction, calculate the cumulative precision and recall. Add these values to the table.
248
+
249
+ 4. **Plot points**: Each row in the table now represents a point on a graph, with cumulative recall on the x-axis and cumulative precision on the y-axis. Initially, this creates a zig-zag line because of variations between predictions.
250
+
251
+ 5. **Smooth the curve**: The true PR curve is derived by plotting only the maximum precision value for each recall level across all thresholds. This means you connect only the highest points of precision for each segment of recall, smoothing out the zig-zags and forming a curve that typically slopes downward as recall increases.
252
+ """
253
+
254
+
255
+ notification_ap = {
256
+ "title": "mAP = {}",
257
+ "description": "",
258
+ }
259
+
260
+ markdown_pr_by_class = (
261
+ """### Precision-Recall Curve by Class
262
+
263
+ In this plot, you can evaluate PR curve for each class individually.
264
+
265
+ """
266
+ + clickable_label
267
+ )
268
+
269
+ markdown_confusion_matrix = (
270
+ """## Confusion Matrix
271
+
272
+ Confusion matrix helps to find the number of confusions between different classes made by the model.
273
+ Each row of the matrix represents the instances in a ground truth class, while each column represents the instances in a predicted class.
274
+ The diagonal elements represent the number of correct predictions for each class (True Positives), and the off-diagonal elements show misclassifications.
275
+
276
+ """
277
+ + clickable_label
278
+ )
279
+
280
+
281
+ markdown_frequently_confused = (
282
+ """### Frequently Confused Classes
283
+
284
+ This chart displays the most frequently confused pairs of classes. In general, it finds out which classes visually seem very similar to the model.
285
+
286
+ The chart calculates the **probability of confusion** between different pairs of classes. For instance, if the probability of confusion for the pair “{} - {}” is {}, this means that when the model predicts either “{}” or “{}”, there is a {}% chance that the model might mistakenly predict one instead of the other.
287
+
288
+ The measure is class-symmetric, meaning that the probability of confusing a {} with a {} is equal to the probability of confusing a {} with a {}.
289
+
290
+ """
291
+ + clickable_label
292
+ )
293
+
294
+
295
+ markdown_localization_accuracy = """## Localization Accuracy (IoU)
296
+
297
+ This section measures how closely predicted bounding boxes generated by the model are aligned with the actual (ground truth) bounding boxes.
298
+ """
299
+
300
+ markdown_iou_calculation = """<img src='https://github.com/dataset-ninja/model-benchmark-template/assets/78355358/8d7c63d0-2f3b-4f3f-9fd8-c6383a4bfba4' alt='alt text' width='300' />
301
+
302
+ To measure it, we calculate the <b>Intersection over Union (IoU)</b>. Intuitively, the higher the IoU, the closer two bounding boxes are. IoU is calculated by dividing the <b>area of overlap</b> between the predicted bounding box and the ground truth bounding box by the <b>area of union</b> of these two boxes.
303
+ """
304
+
305
+ markdown_iou_distribution = """### IoU Distribution
306
+
307
+ This histogram represents the distribution of <abbr title="{}">IoU scores</abbr> among all predictions. This gives you a sense of how well the model aligns bounding boxes. Ideally, the rightmost bars (from 0.9 to 1.0 IoU) should be much higher than others.
308
+ """
309
+
310
+
311
+ notification_avg_iou = {
312
+ "title": "Avg. IoU = {}",
313
+ "description": "",
314
+ }
315
+
316
+ markdown_calibration_score_1 = """## Calibration Score
317
+
318
+ This section analyzes <abbr title="{}">confidence scores</abbr> (or predicted probabilities) that the model generates for every predicted bounding box.
319
+ """
320
+
321
+ markdown_what_is_calibration = """In some applications, it's crucial for a model not only to make accurate predictions but also to provide reliable **confidence levels**. A well-calibrated model aligns its confidence scores with the actual likelihood of predictions being correct. For example, if a model claims 90% confidence for predictions but they are correct only half the time, it is **overconfident**. Conversely, **underconfidence** occurs when a model assigns lower confidence scores than the actual likelihood of its predictions. In the context of autonomous driving, this might cause a vehicle to brake or slow down too frequently, reducing travel efficiency and potentially causing traffic issues."""
322
+ markdown_calibration_score_2 = """To evaluate the calibration, we draw a <b>Reliability Diagram</b> and calculate <b>Expected Calibration Error</b> (ECE)."""
323
+
324
+ markdown_reliability_diagram = """## Reliability Diagram
325
+
326
+ Reliability diagram, also known as a Calibration curve, helps in understanding whether the confidence scores of detections accurately represent the true probability of a correct detection. A well-calibrated model means that when it predicts a detection with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
327
+ """
328
+
329
+ markdown_calibration_curve_interpretation = """
330
+ 1. **The curve is above the Ideal Line (Underconfidence):** If the calibration curve is consistently above the ideal line, this indicates underconfidence. The model's predictions are more correct than the confidence scores suggest. For example, if the model predicts a detection with 70% confidence but, empirically, 90% of such detections are correct, the model is underconfident.
331
+ 2. **The curve is below the Ideal Line (Overconfidence):** If the calibration curve is below the ideal line, the model exhibits overconfidence. This means it is too sure of its predictions. For instance, if the model predicts with 80% confidence but only 60% of these predictions are correct, it is overconfident.
332
+
333
+ To quantify the calibration, we calculate **Expected Calibration Error (ECE).** Intuitively, ECE can be viewed as a deviation of the model's calibration curve from the diagonal line, that corresponds to a perfectly calibrated model. When ECE is high, we can not trust predicted probabilities so much.
334
+
335
+ **Note:** ECE is a measure of **error**. The lower the ECE, the better the calibration. A perfectly calibrated model has an ECE of 0.
336
+ """
337
+
338
+ notification_ece = {
339
+ "title": "Expected Calibration Error (ECE) = {}",
340
+ "description": "",
341
+ }
342
+
343
+
344
+ markdown_confidence_score_1 = """## Confidence Score Profile
345
+
346
+ This section is going deeper in analyzing confidence scores. It gives you an intuition about how these scores are distributed and helps to find the best <abbr title="{}">confidence threshold</abbr> suitable for your task or application.
347
+ """
348
+
349
+ markdown_confidence_score_2 = """This chart provides a comprehensive view about predicted confidence scores. It is used to determine an **optimal confidence threshold** based on your requirements.
350
+
351
+ The plot shows you what the metrics will be if you choose a specific confidence threshold. For example, if you set the threshold to 0.32, you can see on the plot what the precision, recall and f1-score will be for this threshold.
352
+ """
353
+
354
+ markdown_plot_confidence_profile = """
355
+ First, we sort all predictions by confidence scores from highest to lowest. As we iterate over each prediction we calculate the cumulative precision, recall and f1-score so far. Each prediction is plotted as a point on a graph, with a confidence score on the x-axis and one of three metrics on the y-axis (precision, recall, f1-score).
356
+ """
357
+
358
+ markdown_calibration_score_3 = """**How to find an optimal threshold:** you can find the maximum of the f1-score line on the plot, and the confidence score (X-axis) under this maximum corresponds to F1-optimal confidence threshold. This threshold ensures the balance between precision and recall. You can select a threshold according to your desired trade-offs."""
359
+
360
+ notification_f1 = {
361
+ "title": "F1-optimal confidence threshold = {}",
362
+ "description": "",
363
+ }
364
+
365
+ markdown_f1_at_ious = """### Confidence Profile at Different IoU thresholds
366
+
367
+ This chart breaks down the Confidence Profile into multiple curves, each for one <abbr title="{}">IoU threshold</abbr>. In this way you can understand how the f1-optimal confidence threshold changes with various IoU thresholds. Higher IoU thresholds mean that the model should align bounding boxes more accurately. This chart helps to find the optimal confidence threshold for different levels of localization accuracy.
368
+ """
369
+ markdown_confidence_distribution = """### Confidence Distribution
370
+
371
+ This graph helps to assess whether high confidence scores correlate with correct predictions (<abbr title="{}">true positives</abbr>) and the low confidence scores correlate with incorrect ones (<abbr title="{}">false positives</abbr>). It consists of two histograms, one for true positive predictions filled with green, and one for false positives filled with red.
372
+
373
+ Additionally, it provides a view of how predicted probabilities are distributed. Whether the model skews probabilities to lower or higher values, leading to imbalance?
374
+
375
+ Ideally, the green histogram (TP predictions) should have higher confidence scores and be shifted to the right, indicating that the model is sure about its correct predictions, and the red histogram (FP predictions) should have lower confidence scores and be shifted to the left.
376
+ """
377
+
378
+ markdown_class_ap_polar = (
379
+ """## Average Precision by Class
380
+
381
+ A quick visual comparison of the model performance across all classes. Each axis in the chart represents a different class, and the distance to the center indicates the <abbr title="{}">Average Precision</abbr> (AP) for that class.
382
+
383
+ """
384
+ + clickable_label
385
+ )
386
+
387
+ markdown_class_ap_bar = (
388
+ """## Average Precision by Class
389
+
390
+ A quick visual comparison of the model performance across all classes. Each bar in the chart represents a different class, and the height of the bar indicates the <abbr title="{}">Average Precision</abbr> (AP) for that class.
391
+
392
+ """
393
+ + clickable_label
394
+ )
395
+
396
+
397
+ markdown_class_outcome_counts_1 = """### Outcome Counts by Class
398
+
399
+ This chart breaks down all predictions into <abbr title="{}">True Positives</abbr> (TP), <abbr title="{}">False Positives</abbr> (FP), and <abbr title="{}">False Negatives</abbr> (FN) by classes. This helps to visually assess the type of errors the model often encounters for each class.
400
+
401
+ """
402
+
403
+ markdown_normalization = """Normalization is used for better interclass comparison. If the normalization is on, the total outcome counts are divided by the number of ground truth instances of the corresponding class. This is useful, because on the chart, the sum of TP and FN bars will always result in 1.0, representing the full set of ground truth instances in the dataset for a class. This provides a clear visual understanding of how many instances the model correctly detected, how many it missed, and how many were false positives. For example, if a green bar (TP outcomes) reaches 1.0, this means the model has managed to predict all objects for the class without false negatives. Everything that is higher than 1.0 corresponds to False Positives, i.e, redundant predictions that the model should not predict. You can turn off the normalization, switching to absolute counts.
404
+
405
+ If normalization is off, the chart will display the total count of instances that correspond to outcome type (one of TP, FP or FN). This mode is identical to the main Outcome Counts graph on the top of the page. However, when normalization is off, you may encounter a class imbalance problem. Visually, bars that correspond to classes with many instances in the dataset will be much larger than others. This complicates the visual analysis.
406
+ """
407
+
408
+ markdown_class_outcome_counts_2 = (
409
+ """You can switch the plot view between normalized and absolute values.
410
+
411
+ _Bars in the chart are sorted by <abbr title="{}">F1-score</abbr> to keep a unified order of classes between different charts._
412
+
413
+ """
414
+ + clickable_label
415
+ )
416
+
417
+ empty = """### {}
418
+
419
+ > {}
420
+ """
421
+
422
+ markdown_speedtest_intro = """## Inference Speed
423
+
424
+ This is a speed test benchmark for compared models. Models was tested with the following configurations:
425
+ """
426
+
427
+ markdown_speedtest_overview_ms = """### Latency (Inference Time)
428
+ The table below shows the speed test results. For each test, the time taken to process one batch of images is shown. Results are averaged across **{}** iterations.
429
+ """
430
+
431
+ markdown_speedtest_overview_fps = """### Frames per Second (FPS)
432
+ The table below shows the speed test results. For each test, the number of frames processed per second is shown. Results are averaged across **{}** iterations.
433
+ """
434
+
435
+ markdown_batch_inference = """
436
+ This chart shows how the model's speed changes with different batch sizes . As the batch size increases, you can observe an increase in FPS (images per second).
437
+ """
@@ -0,0 +1,27 @@
1
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.avg_precision_by_class import (
2
+ AveragePrecisionByClass,
3
+ )
4
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.calibration_score import (
5
+ CalibrationScore,
6
+ )
7
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.explore_predicttions import (
8
+ ExplorePredictions,
9
+ )
10
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.localization_accuracy import (
11
+ LocalizationAccuracyIoU,
12
+ )
13
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.outcome_counts import (
14
+ OutcomeCounts,
15
+ )
16
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.overview import (
17
+ Overview,
18
+ )
19
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.pr_curve import (
20
+ PrCurve,
21
+ )
22
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.precision_recal_f1 import (
23
+ PrecisionRecallF1,
24
+ )
25
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.speedtest import (
26
+ Speedtest,
27
+ )
@@ -0,0 +1,125 @@
1
+ from supervisely.nn.benchmark.comparison.detection_visualization.vis_metrics.vis_metric import (
2
+ BaseVisMetric,
3
+ )
4
+ from supervisely.nn.benchmark.visualization.widgets import ChartWidget, MarkdownWidget
5
+
6
+
7
+ class AveragePrecisionByClass(BaseVisMetric):
8
+ MARKDOWN_CLASS_AP = "markdown_class_ap_polar"
9
+ MARKDOWN_CLASS_AP_BAR = "markdown_class_ap_bar"
10
+
11
+ def get_figure(self):
12
+ import plotly.graph_objects as go # pylint: disable=import-error
13
+
14
+ fig = go.Figure()
15
+ labels = dict(r="Average Precision", theta="Class")
16
+ cls_cnt = len(self.eval_results[0].mp.cat_names)
17
+ for i, eval_result in enumerate(self.eval_results, 1):
18
+ # AP per-class
19
+ ap_per_class = eval_result.mp.coco_precision[:, :, :, 0, 2].mean(axis=(0, 1))
20
+ ap_per_class[ap_per_class == -1] = 0 # -1 is a placeholder for no GT
21
+
22
+ trace_name = f"[{i}] {eval_result.name}"
23
+
24
+ if cls_cnt >= 5:
25
+ fig.add_trace(
26
+ go.Scatterpolar(
27
+ r=ap_per_class,
28
+ theta=eval_result.mp.cat_names,
29
+ name=trace_name,
30
+ marker=dict(color=eval_result.color),
31
+ hovertemplate=trace_name
32
+ + "<br>"
33
+ + labels["theta"]
34
+ + ": %{theta}<br>"
35
+ + labels["r"]
36
+ + ": %{r:.2f}<br>"
37
+ + "<extra></extra>",
38
+ )
39
+ )
40
+ else:
41
+ fig.add_trace(
42
+ go.Bar(
43
+ x=eval_result.mp.cat_names,
44
+ y=ap_per_class,
45
+ name=trace_name,
46
+ width=0.2,
47
+ marker=dict(color=eval_result.color),
48
+ )
49
+ )
50
+
51
+ if cls_cnt >= 5:
52
+ fig.update_layout(
53
+ width=800,
54
+ height=800,
55
+ margin=dict(l=80, r=80, t=0, b=0),
56
+ modebar_add=["resetScale"],
57
+ showlegend=True,
58
+ polar=dict(radialaxis_range=[0, 1]),
59
+ )
60
+ else:
61
+ fig.update_layout(
62
+ xaxis_title="Class",
63
+ yaxis_title="Average Precision",
64
+ yaxis=dict(range=[0, 1.1]),
65
+ barmode="group",
66
+ width=700,
67
+ )
68
+
69
+ return fig
70
+
71
+ @property
72
+ def markdown_widget(self) -> MarkdownWidget:
73
+ template_name = self.MARKDOWN_CLASS_AP
74
+ if len(self.eval_results[0].mp.cat_names) < 5:
75
+ template_name = self.MARKDOWN_CLASS_AP_BAR
76
+ text: str = getattr(self.vis_texts, template_name).format(
77
+ self.vis_texts.definitions.average_precision
78
+ )
79
+ return MarkdownWidget(
80
+ name=self.MARKDOWN_CLASS_AP, title="Average Precision by Class", text=text
81
+ )
82
+
83
+ @property
84
+ def chart_widget(self) -> ChartWidget:
85
+ chart = ChartWidget(name="chart_class_ap", figure=self.get_figure())
86
+ chart.set_click_data(
87
+ gallery_id=self.explore_modal_table.id,
88
+ click_data=self.get_click_data(),
89
+ chart_click_extra="'getKey': (payload) => `${payload.points[0].curveNumber}${'_'}${payload.points[0].theta}`,",
90
+ )
91
+ return chart
92
+
93
+ def get_click_data(self):
94
+ res = {}
95
+ res["layoutTemplate"] = [None, None, None]
96
+ res["clickData"] = {}
97
+
98
+ for i, eval_result in enumerate(self.eval_results):
99
+ model_name = f"Model {i}"
100
+ for cat_name, v in eval_result.click_data.objects_by_class.items():
101
+ key = f"{i}_{cat_name}"
102
+ ap_per_class_dict = res["clickData"].setdefault(key, {})
103
+
104
+ img_ids = set()
105
+ obj_ids = set()
106
+
107
+ title = f"{model_name}, class: {len(v)} object{'s' if len(v) > 1 else ''}"
108
+ ap_per_class_dict["title"] = title
109
+
110
+ for x in v:
111
+ img_ids.add(x["dt_img_id"])
112
+ obj_ids.add(x["dt_obj_id"])
113
+
114
+ ap_per_class_dict["imagesIds"] = list(img_ids)
115
+ ap_per_class_dict["filters"] = [
116
+ {
117
+ "type": "tag",
118
+ "tagId": "confidence",
119
+ "value": [eval_result.f1_optimal_conf, 1],
120
+ },
121
+ {"type": "tag", "tagId": "outcome", "value": "TP"},
122
+ {"type": "specific_objects", "tagId": None, "value": list(obj_ids)},
123
+ ]
124
+
125
+ return res