supervisely 6.73.294__py3-none-any.whl → 6.73.296__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (39) hide show
  1. supervisely/cli/release/run.py +34 -51
  2. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py +1 -1
  3. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py +10 -0
  4. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predictions.py +2 -2
  5. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py +1 -1
  6. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +14 -8
  7. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py +1 -1
  8. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py +2 -2
  9. supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml +6 -1
  10. supervisely/nn/benchmark/instance_segmentation/text_templates.py +4 -4
  11. supervisely/nn/benchmark/object_detection/base_vis_metric.py +1 -1
  12. supervisely/nn/benchmark/object_detection/evaluation_params.yaml +6 -1
  13. supervisely/nn/benchmark/object_detection/evaluator.py +1 -3
  14. supervisely/nn/benchmark/object_detection/metric_provider.py +59 -46
  15. supervisely/nn/benchmark/object_detection/text_templates.py +4 -4
  16. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_distribution.py +20 -2
  17. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +16 -0
  18. supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py +10 -5
  19. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +1 -0
  20. supervisely/nn/benchmark/object_detection/vis_metrics/model_predictions.py +1 -1
  21. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py +2 -57
  22. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +1 -1
  23. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +11 -3
  24. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +1 -1
  25. supervisely/nn/benchmark/object_detection/vis_metrics/precision.py +18 -8
  26. supervisely/nn/benchmark/object_detection/vis_metrics/recall.py +13 -3
  27. supervisely/nn/benchmark/object_detection/visualizer.py +1 -1
  28. supervisely/nn/benchmark/utils/__init__.py +0 -1
  29. supervisely/nn/benchmark/utils/detection/__init__.py +1 -2
  30. supervisely/nn/benchmark/utils/detection/calculate_metrics.py +31 -37
  31. supervisely/nn/benchmark/visualization/evaluation_result.py +2 -4
  32. supervisely/nn/benchmark/visualization/vis_click_data.py +1 -3
  33. {supervisely-6.73.294.dist-info → supervisely-6.73.296.dist-info}/METADATA +1 -1
  34. {supervisely-6.73.294.dist-info → supervisely-6.73.296.dist-info}/RECORD +38 -39
  35. supervisely/nn/benchmark/utils/detection/metric_provider.py +0 -533
  36. {supervisely-6.73.294.dist-info → supervisely-6.73.296.dist-info}/LICENSE +0 -0
  37. {supervisely-6.73.294.dist-info → supervisely-6.73.296.dist-info}/WHEEL +0 -0
  38. {supervisely-6.73.294.dist-info → supervisely-6.73.296.dist-info}/entry_points.txt +0 -0
  39. {supervisely-6.73.294.dist-info → supervisely-6.73.296.dist-info}/top_level.txt +0 -0
@@ -28,7 +28,8 @@ class ConfidenceDistribution(DetectionVisMetric):
28
28
  def _get_figure(self): # -> go.Figure:
29
29
  import plotly.graph_objects as go # pylint: disable=import-error
30
30
 
31
- f1_optimal_conf, best_f1 = self.eval_result.mp.m_full.get_f1_optimal_conf()
31
+ f1_optimal_conf = self.eval_result.mp.f1_optimal_conf
32
+ custom_conf_threshold = self.eval_result.mp.custom_conf_threshold
32
33
 
33
34
  # Histogram of confidence scores (TP vs FP)
34
35
  scores_tp, scores_fp = self.eval_result.mp.m_full.scores_tp_and_fp()
@@ -88,7 +89,7 @@ class ConfidenceDistribution(DetectionVisMetric):
88
89
  x1=f1_optimal_conf,
89
90
  y0=0,
90
91
  y1=tp_y.max() * 1.3,
91
- line=dict(color="orange", width=1, dash="dash"),
92
+ line=dict(color="orange", width=2, dash="dash"),
92
93
  )
93
94
  fig.add_annotation(
94
95
  x=f1_optimal_conf,
@@ -104,4 +105,21 @@ class ConfidenceDistribution(DetectionVisMetric):
104
105
  )
105
106
  fig.update_xaxes(title_text="Confidence Score", range=[0, 1])
106
107
  fig.update_yaxes(title_text="Count", range=[0, tp_y.max() * 1.3])
108
+
109
+ if custom_conf_threshold is not None:
110
+ # Custom threshold
111
+ fig.add_shape(
112
+ type="line",
113
+ x0=custom_conf_threshold,
114
+ x1=custom_conf_threshold,
115
+ y0=0,
116
+ y1=tp_y.max() * 1.3,
117
+ line=dict(color="orange", width=2, dash="dash"),
118
+ )
119
+ fig.add_annotation(
120
+ x=custom_conf_threshold,
121
+ y=tp_y.max() * 1.3,
122
+ text=f"Confidence threshold: {custom_conf_threshold:.2f}",
123
+ showarrow=False,
124
+ )
107
125
  return fig
@@ -101,6 +101,22 @@ class ConfidenceScore(DetectionVisMetric):
101
101
  text=f"F1-optimal threshold: {self.eval_result.mp.f1_optimal_conf:.2f}",
102
102
  showarrow=False,
103
103
  )
104
+ if self.eval_result.mp.custom_conf_threshold is not None:
105
+ # Add vertical line for the custom threshold
106
+ fig.add_shape(
107
+ type="line",
108
+ x0=self.eval_result.mp.custom_conf_threshold,
109
+ x1=self.eval_result.mp.custom_conf_threshold,
110
+ y0=0,
111
+ y1=self.eval_result.mp.custom_f1,
112
+ line=dict(color="black", width=2, dash="dash"),
113
+ )
114
+ fig.add_annotation(
115
+ x=self.eval_result.mp.custom_conf_threshold,
116
+ y=self.eval_result.mp.custom_f1 + 0.04,
117
+ text=f"Confidence threshold: {self.eval_result.mp.custom_conf_threshold:.2f}",
118
+ showarrow=False,
119
+ )
104
120
  fig.update_layout(
105
121
  dragmode=False,
106
122
  modebar=dict(
@@ -14,12 +14,17 @@ class ExplorePredictions(DetectionVisMetric):
14
14
 
15
15
  @property
16
16
  def md(self) -> MarkdownWidget:
17
- text = self.vis_texts.markdown_explorer
17
+ conf_threshold_info = "Differences are calculated only for the optimal confidence threshold, allowing you to focus on the most accurate predictions made by the model."
18
+ if self.eval_result.mp.custom_conf_threshold is not None:
19
+ conf_threshold_info = (
20
+ "Differences are calculated for the custom confidence threshold (set manually)."
21
+ )
22
+ text = self.vis_texts.markdown_explorer.format(conf_threshold_info)
23
+
18
24
  return MarkdownWidget(self.MARKDOWN, "Explore Predictions", text)
19
25
 
20
26
  def gallery(self, opacity) -> GalleryWidget:
21
- optimal_conf = self.eval_result.mp.f1_optimal_conf
22
- default_filters = [{"confidence": [optimal_conf, 1]}]
27
+ default_filters = [{"confidence": [self.eval_result.mp.conf_threshold, 1]}]
23
28
  gallery = GalleryWidget(
24
29
  self.GALLERY, columns_number=3, filters=default_filters, opacity=opacity
25
30
  )
@@ -62,7 +67,7 @@ class ExplorePredictions(DetectionVisMetric):
62
67
  {
63
68
  "type": "tag",
64
69
  "tagId": "confidence",
65
- "value": [self.eval_result.mp.f1_optimal_conf, 1],
70
+ "value": [self.eval_result.mp.conf_threshold, 1],
66
71
  }
67
72
  ]
68
73
  explore["title"] = "Explore all predictions"
@@ -89,7 +94,7 @@ class ExplorePredictions(DetectionVisMetric):
89
94
  {
90
95
  "type": "tag",
91
96
  "tagId": "confidence",
92
- "value": [self.eval_result.mp.f1_optimal_conf, 1],
97
+ "value": [self.eval_result.mp.conf_threshold, 1],
93
98
  },
94
99
  ]
95
100
  for pairs_data in self.eval_result.matched_pair_data.values():
@@ -56,6 +56,7 @@ class KeyMetrics(DetectionVisMetric):
56
56
  width="60%",
57
57
  show_header_controls=False,
58
58
  main_column=columns[0],
59
+ page_size=15,
59
60
  )
60
61
  return table
61
62
 
@@ -99,7 +99,7 @@ class ModelPredictions(DetectionVisMetric):
99
99
  {
100
100
  "type": "tag",
101
101
  "tagId": "confidence",
102
- "value": [self.eval_result.mp.f1_optimal_conf, 1],
102
+ "value": [self.eval_result.mp.conf_threshold, 1],
103
103
  },
104
104
  # {"type": "tag", "tagId": "outcome", "value": "FP"},
105
105
  ]
@@ -120,69 +120,14 @@ class OutcomeCounts(DetectionVisMetric):
120
120
  "title"
121
121
  ] = f"{outcome}: {len(matches_data)} object{'s' if len(matches_data) > 1 else ''}"
122
122
  res["clickData"][outcome]["imagesIds"] = list(img_ids)
123
+
123
124
  res["clickData"][outcome]["filters"] = [
124
125
  {
125
126
  "type": "tag",
126
127
  "tagId": "confidence",
127
- "value": [self.eval_result.mp.f1_optimal_conf, 1],
128
+ "value": [self.eval_result.mp.conf_threshold, 1],
128
129
  },
129
130
  {"type": "tag", "tagId": "outcome", "value": outcome},
130
131
  ]
131
132
 
132
133
  return res
133
-
134
- # def get_diff_data(self) -> Dict:
135
- # res = {}
136
-
137
- # res["layoutTemplate"] = [
138
- # {"skipObjectTagsFiltering": True, "columnTitle": "Ground Truth"},
139
- # {"skipObjectTagsFiltering": ["outcome"], "columnTitle": "Prediction"},
140
- # {"skipObjectTagsFiltering": ["confidence"], "columnTitle": "Difference"},
141
- # ]
142
-
143
- # click_data = res.setdefault("clickData", {})
144
- # for outcome, matches_data in self.eval_result.click_data.outcome_counts.items():
145
- # filters = [
146
- # {
147
- # "type": "tag",
148
- # "tagId": "confidence",
149
- # "value": [self.eval_result.mp.f1_optimal_conf, 1],
150
- # },
151
- # {"type": "tag", "tagId": "outcome", "value": outcome},
152
- # ]
153
- # for match_data in matches_data:
154
- # pairs_data = self.eval_result.matched_pair_data[match_data["gt_img_id"]]
155
- # gt = pairs_data.gt_image_info
156
- # pred = pairs_data.pred_image_info
157
- # diff = pairs_data.diff_image_info
158
- # assert gt.name == pred.name == diff.name
159
- # for img_id in [pred.id, diff.id]:
160
- # key = click_data.setdefault(str(img_id), {})
161
- # key["imagesIds"] = [gt.id, pred.id, diff.id]
162
- # key["filters"] = filters
163
- # key["title"] = f"Image: {gt.name}"
164
-
165
- # object_bindings = []
166
- # for img in [pred, diff]:
167
- # if img == pred:
168
- # ann_json = pairs_data.pred_annotation.to_json()
169
- # else:
170
- # ann_json = pairs_data.diff_annotation.to_json()
171
- # for obj in ann_json["objects"]:
172
- # for tag in obj["tags"]:
173
- # if tag["name"] == "matched_gt_id":
174
- # object_bindings.append(
175
- # [
176
- # {
177
- # "id": obj["id"],
178
- # "annotationKey": img.id,
179
- # },
180
- # {
181
- # "id": int(tag["value"]),
182
- # "annotationKey": gt.id if img == pred else pred.id,
183
- # },
184
- # ]
185
- # )
186
- # key["objectsBindings"] = object_bindings
187
-
188
- # return res
@@ -183,7 +183,7 @@ class PerClassOutcomeCounts(DetectionVisMetric):
183
183
  {
184
184
  "type": "tag",
185
185
  "tagId": "confidence",
186
- "value": [self.eval_result.mp.f1_optimal_conf, 1],
186
+ "value": [self.eval_result.mp.conf_threshold, 1],
187
187
  },
188
188
  {"type": "tag", "tagId": "outcome", "value": outcome},
189
189
  ]
@@ -37,6 +37,15 @@ class Overview(DetectionVisMetric):
37
37
  if self.eval_result.different_iou_thresholds_per_class:
38
38
  iou_threshold = "Different IoU thresholds for each class (see the table below)"
39
39
 
40
+ conf_text = (
41
+ f"- **Optimal confidence threshold**: "
42
+ f"{round(self.eval_result.mp.f1_optimal_conf, 4)} (calculated automatically), "
43
+ f"<a href='{opt_conf_url}' target='_blank'>learn more</a>."
44
+ )
45
+ custom_conf_thrs = self.eval_result.mp.custom_conf_threshold
46
+ if custom_conf_thrs is not None:
47
+ conf_text += f"\n- **Custom confidence threshold**: {custom_conf_thrs}"
48
+
40
49
  formats = [
41
50
  model_name.replace("_", "\_"),
42
51
  checkpoint_name.replace("_", "\_"),
@@ -51,8 +60,7 @@ class Overview(DetectionVisMetric):
51
60
  note_about_images,
52
61
  starter_app_info,
53
62
  iou_threshold,
54
- round(self.eval_result.mp.f1_optimal_conf, 4),
55
- opt_conf_url,
63
+ conf_text,
56
64
  self.eval_result.mp.average_across_iou_thresholds,
57
65
  average_url,
58
66
  self.vis_texts.docs_url,
@@ -119,7 +127,7 @@ class Overview(DetectionVisMetric):
119
127
  starter_app_info = train_session or evaluator_session or ""
120
128
 
121
129
  return classes_str, images_str, starter_app_info
122
-
130
+
123
131
  @property
124
132
  def iou_per_class_md(self) -> List[MarkdownWidget]:
125
133
  if not self.eval_result.different_iou_thresholds_per_class:
@@ -19,7 +19,7 @@ class PRCurve(DetectionVisMetric):
19
19
 
20
20
  @property
21
21
  def md(self) -> MarkdownWidget:
22
- text = self.vis_texts.markdown_pr_curve.format(self.vis_texts.definitions.f1_score)
22
+ text = self.vis_texts.markdown_pr_curve.format(self.vis_texts.definitions.about_pr_tradeoffs)
23
23
  return MarkdownWidget(self.MARKDOWN, "Precision-Recall Curve", text)
24
24
 
25
25
  @property
@@ -26,11 +26,21 @@ class Precision(DetectionVisMetric):
26
26
  @property
27
27
  def notification(self) -> NotificationWidget:
28
28
  title, desc = self.vis_texts.notification_precision.values()
29
- tp_plus_fp = self.eval_result.mp.TP_count + self.eval_result.mp.FP_count
29
+ mp = self.eval_result.mp
30
+ tp_plus_fp = mp.TP_count + mp.FP_count
31
+ precision = mp.base_metrics()["precision"].round(2)
32
+ if mp.average_across_iou_thresholds:
33
+ iou_text = "[0.5,0.55,...,0.95]"
34
+ else:
35
+ if mp.iou_threshold_per_class is not None:
36
+ iou_text = "custom"
37
+ else:
38
+ iou_text = mp.iou_threshold
39
+ title = f"Precision (IoU={iou_text}) = {precision}"
30
40
  return NotificationWidget(
31
41
  self.NOTIFICATION,
32
- title.format(self.eval_result.mp.base_metrics()["precision"].round(2)),
33
- desc.format(self.eval_result.mp.TP_count, tp_plus_fp),
42
+ title,
43
+ desc.format(mp.TP_count, tp_plus_fp),
34
44
  )
35
45
 
36
46
  @property
@@ -51,9 +61,9 @@ class Precision(DetectionVisMetric):
51
61
  def _get_figure(self): # -> go.Figure
52
62
  import plotly.express as px # pylint: disable=import-error
53
63
 
54
- sorted_by_precision = self.eval_result.mp.per_class_metrics().sort_values(by="precision")
64
+ sorted_by_f1 = self.eval_result.mp.per_class_metrics().sort_values(by="f1")
55
65
  fig = px.bar(
56
- sorted_by_precision,
66
+ sorted_by_f1,
57
67
  x="category",
58
68
  y="precision",
59
69
  # title="Per-class Precision (Sorted by F1)",
@@ -62,14 +72,14 @@ class Precision(DetectionVisMetric):
62
72
  color_continuous_scale="Plasma",
63
73
  )
64
74
  fig.update_traces(hovertemplate="Class: %{x}<br>Precision: %{y:.2f}<extra></extra>")
65
- if len(sorted_by_precision) <= 20:
75
+ if len(sorted_by_f1) <= 20:
66
76
  fig.update_traces(
67
- text=sorted_by_precision.round(2),
77
+ text=sorted_by_f1["precision"].round(2),
68
78
  textposition="outside",
69
79
  )
70
80
  fig.update_xaxes(title_text="Class")
71
81
  fig.update_yaxes(title_text="Precision", range=[0, 1])
72
82
  fig.update_layout(
73
- width=700 if len(sorted_by_precision) < 10 else None,
83
+ width=700 if len(sorted_by_f1) < 10 else None,
74
84
  )
75
85
  return fig
@@ -26,11 +26,21 @@ class Recall(DetectionVisMetric):
26
26
  @property
27
27
  def notification(self) -> NotificationWidget:
28
28
  title, desc = self.vis_texts.notification_recall.values()
29
- tp_plus_fn = self.eval_result.mp.TP_count + self.eval_result.mp.FN_count
29
+ mp = self.eval_result.mp
30
+ tp_plus_fn = mp.TP_count + mp.FN_count
31
+ recall = mp.base_metrics()["recall"].round(2)
32
+ if mp.average_across_iou_thresholds:
33
+ iou_text = "[0.5,0.55,...,0.95]"
34
+ else:
35
+ if mp.iou_threshold_per_class is not None:
36
+ iou_text = "custom"
37
+ else:
38
+ iou_text = mp.iou_threshold
39
+ title = f"Recall (IoU={iou_text}) = {recall}"
30
40
  return NotificationWidget(
31
41
  self.NOTIFICATION,
32
- title.format(self.eval_result.mp.base_metrics()["recall"].round(2)),
33
- desc.format(self.eval_result.mp.TP_count, tp_plus_fn),
42
+ title,
43
+ desc.format(mp.TP_count, tp_plus_fn),
34
44
  )
35
45
 
36
46
  @property
@@ -452,7 +452,7 @@ class ObjectDetectionVisualizer(BaseVisualizer):
452
452
  conf = tag.value
453
453
  break
454
454
 
455
- if conf < self.eval_result.mp.f1_optimal_conf:
455
+ if conf < self.eval_result.mp.conf_threshold:
456
456
  continue # do not add labels with low confidence to diff project
457
457
  if match_tag_id:
458
458
  continue # do not add TP labels to diff project
@@ -1,6 +1,5 @@
1
1
  # isort: skip_file
2
2
  from supervisely.nn.benchmark.utils.detection.calculate_metrics import calculate_metrics
3
- from supervisely.nn.benchmark.utils.detection.metric_provider import MetricProvider
4
3
  from supervisely.nn.benchmark.utils.detection.sly2coco import sly2coco
5
4
  from supervisely.nn.benchmark.utils.detection.utlis import read_coco_datasets
6
5
  from supervisely.nn.benchmark.utils.detection.utlis import try_set_conf_auto
@@ -1,2 +1 @@
1
- from supervisely.nn.benchmark.utils.detection.calculate_metrics import calculate_metrics
2
- from supervisely.nn.benchmark.utils.detection.metric_provider import MetricProvider
1
+ from supervisely.nn.benchmark.utils.detection.calculate_metrics import calculate_metrics
@@ -82,6 +82,11 @@ def calculate_metrics(
82
82
  iou_idx = np.where(np.isclose(iouThrs, iou_threshold))[0][0]
83
83
  iou_idx_per_class = {cat_id: iou_idx for cat_id in cocoGt.getCatIds()}
84
84
 
85
+ # TODO: Add support for average_across_iou_thresholds
86
+ if iou_threshold_per_class is not None or iou_threshold != 0.5:
87
+ average_across_iou_thresholds = False
88
+ evaluation_params["average_across_iou_thresholds"] = average_across_iou_thresholds
89
+
85
90
  eval_img_dict = get_eval_img_dict(cocoEval)
86
91
  eval_img_dict_cls = get_eval_img_dict(cocoEval_cls)
87
92
  matches = get_matches(
@@ -90,6 +95,7 @@ def calculate_metrics(
90
95
  cocoEval_cls,
91
96
  iou_idx_per_class=iou_idx_per_class,
92
97
  )
98
+ # true_positives, false_positives, false_negatives = get_counts(eval_img_dict, cocoEval_cls)
93
99
 
94
100
  params = {
95
101
  "iouThrs": cocoEval.params.iouThrs,
@@ -110,43 +116,31 @@ def calculate_metrics(
110
116
  return eval_data
111
117
 
112
118
 
113
- def get_counts(cocoEval):
114
- """
115
- true_positives, false_positives, false_negatives
116
-
117
- type cocoEval: COCOeval
118
- """
119
- aRng = cocoEval.params.areaRng[0]
120
- cat_ids = cocoEval.params.catIds
121
- eval_imgs = [ev for ev in cocoEval.evalImgs if ev is not None and ev["aRng"] == aRng]
122
-
123
- N = len(eval_imgs)
124
- T = len(cocoEval.params.iouThrs)
125
- K = max(cat_ids) + 1
126
-
127
- true_positives = np.zeros((K, N, T))
128
- false_positives = np.zeros((K, N, T))
129
- false_negatives = np.zeros((K, N, T))
130
-
131
- for i, eval_img in enumerate(eval_imgs):
132
- catId = eval_img["category_id"]
133
- dt_matches = eval_img["dtMatches"]
134
- gt_matches = eval_img["gtMatches"]
135
-
136
- # Ignore
137
- if np.any(eval_img["gtIgnore"]):
138
- dt_matches = eval_img["dtMatches"].copy()
139
- dt_matches[eval_img["dtIgnore"]] = -1
140
-
141
- gt_matches = eval_img["gtMatches"].copy()
142
- gt_ignore_mask = eval_img["gtIgnore"][None,].repeat(T, axis=0).astype(bool)
143
- gt_matches[gt_ignore_mask] = -1
144
-
145
- true_positives[catId, i] = np.sum(dt_matches > 0, axis=1)
146
- false_positives[catId, i] = np.sum(dt_matches == 0, axis=1)
147
- false_negatives[catId, i] = np.sum(gt_matches == 0, axis=1)
148
-
149
- return true_positives[cat_ids], false_positives[cat_ids], false_negatives[cat_ids]
119
+ def get_counts(eval_img_dict: dict, cocoEval_cls):
120
+ cat_ids = cocoEval_cls.cocoGt.getCatIds()
121
+ iouThrs = cocoEval_cls.params.iouThrs
122
+ catId2idx = {cat_id: i for i, cat_id in enumerate(cat_ids)}
123
+ true_positives = np.zeros((len(cat_ids), len(iouThrs)))
124
+ false_positives = np.zeros((len(cat_ids), len(iouThrs)))
125
+ false_negatives = np.zeros((len(cat_ids), len(iouThrs)))
126
+ for img_id, eval_imgs in eval_img_dict.items():
127
+ for eval_img in eval_imgs:
128
+ cat_id = eval_img["category_id"]
129
+ cat_idx = catId2idx[cat_id]
130
+ gtIgnore = eval_img["gtIgnore"]
131
+ # if conf_thresh is not None:
132
+ # scores = np.array(eval_img["dtScores"])
133
+ # dt_conf_mask = scores < conf_thresh
134
+ # dt_not_ignore[:, dt_conf_mask] = False
135
+ # for idx, dt_id in enumerate(eval_img['dtIds']):
136
+ # if dt_conf_mask[idx]:
137
+ # eval_img["gtMatches"][eval_img["gtMatches"] == dt_id] = 0.
138
+ gt_not_ignore_idxs = np.where(np.logical_not(gtIgnore))[0]
139
+ dt_not_ignore = np.logical_not(eval_img["dtIgnore"])
140
+ true_positives[cat_idx] += ((eval_img["dtMatches"] > 0) & dt_not_ignore).sum(1)
141
+ false_positives[cat_idx] += ((eval_img["dtMatches"] == 0) & dt_not_ignore).sum(1)
142
+ false_negatives[cat_idx] += (eval_img["gtMatches"][:, gt_not_ignore_idxs] == 0).sum(1)
143
+ return true_positives.astype(int), false_positives.astype(int), false_negatives.astype(int)
150
144
 
151
145
 
152
146
  def get_counts_and_scores(cocoEval, cat_id: int, t: int):
@@ -13,7 +13,7 @@ from supervisely.app.widgets import SlyTqdm
13
13
  from supervisely.io.env import team_id
14
14
  from supervisely.io.fs import dir_empty, mkdir
15
15
  from supervisely.io.json import load_json_file
16
- from supervisely.nn.benchmark.utils.detection.metric_provider import MetricProvider
16
+ from supervisely.nn.benchmark.object_detection.metric_provider import MetricProvider
17
17
  from supervisely.nn.benchmark.visualization.vis_click_data import ClickData, IdMapper
18
18
  from supervisely.sly_logger import logger
19
19
  from supervisely.task.progress import tqdm_sly
@@ -218,9 +218,7 @@ class EvalResult:
218
218
  )
219
219
 
220
220
  self.mp = MetricProvider(
221
- self.eval_data["matches"],
222
- self.eval_data["coco_metrics"],
223
- self.eval_data["params"],
221
+ self.eval_data,
224
222
  self.coco_gt,
225
223
  self.coco_dt,
226
224
  )
@@ -2,9 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from collections import defaultdict
4
4
 
5
- from supervisely.nn.benchmark.utils.detection.metric_provider import (
6
- MetricProvider,
7
- )
5
+ from supervisely.nn.benchmark.object_detection.metric_provider import MetricProvider
8
6
 
9
7
 
10
8
  class IdMapper:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.294
3
+ Version: 6.73.296
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -550,7 +550,7 @@ supervisely/cli/project/project_get.py,sha256=RWnMxuKd_WWhUlA5QEqI_9d4N6x8VDq0ta
550
550
  supervisely/cli/project/project_upload.py,sha256=qA_0ktOpJxUUa_Hliwvny8-uIYDQBiuzHMq8nufbLd4,1416
551
551
  supervisely/cli/release/__init__.py,sha256=5aDijgIIDsKFZVayq8anvv5ynWKhC4LZqAdESTKCW2c,335
552
552
  supervisely/cli/release/release.py,sha256=OtAi9g_2kvjp5lx4aiTE0JxczzJcaBtDFpHR-rHWFpE,9308
553
- supervisely/cli/release/run.py,sha256=z6jDTVweCnlaIWsdUIN282B_JUqYZefJMIPLZnq2g9U,19899
553
+ supervisely/cli/release/run.py,sha256=nuFh9mVFcrUlB4PWagMEC6iUIa7nA2pMiM_CC1IZ42M,19814
554
554
  supervisely/cli/task/__init__.py,sha256=n0ofJDqX3AMvvTz1umfBDfEUPDFzk5Htve3nnZFd7fs,67
555
555
  supervisely/cli/task/task_set.py,sha256=KIGJ-X0iB7DzX3Ig8720FJh1WpohTVkkPk8HZt2rIzM,1337
556
556
  supervisely/cli/teamfiles/__init__.py,sha256=1V9ZFo8-xVOjFFJr7siJx2VAv-F0m44cxMEm5ZEU0Ww,251
@@ -755,14 +755,14 @@ supervisely/nn/benchmark/comparison/detection_visualization/__init__.py,sha256=4
755
755
  supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py,sha256=JQ2DqGsvUBbjVmEsq9pGc41U8WQrtaX_Gin4IBguIow,28735
756
756
  supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py,sha256=1PZU9P7VP4lPZDuv9dg_f3il03SQ_zRr5G_L6Q72gbQ,11421
757
757
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py,sha256=2cqjAOwahJoptYhbFKEWws8gRW3K3bxlA9KnQleCrsA,1125
758
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py,sha256=f4FMY-XCiNs0VA-e0aOFVNImjTQSnF4wTLNRSlj2oYA,4636
759
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py,sha256=IKg2rofo21xGxQIoQyNlZ-UqOpAVviSn_A9SDkokC0w,7592
760
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predictions.py,sha256=axaviTZ4dLVWIc2R-o0Kv8g8Zr1SQVfyeokoLEA9Eqw,6484
758
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py,sha256=cf4jxgrau1t8SHiMisnHJ7aenVRPSLUgz1Q4Gic_jTk,4635
759
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py,sha256=BAYWwLASuq-OBQkjFRH6GIT3nUBTZY0RJNipP2CD9To,8075
760
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predictions.py,sha256=JGyvYZ4ZpbgZE2zQ6yUY6flE4n2STl0OMwmwpDF4AeU,6482
761
761
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py,sha256=OYmnloods1UYQ8SIPAcyOK33w0iYSn637OeMKNTrgbA,5342
762
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py,sha256=vev6EOc7-01i-5VAyQQwm9FGOMFYcWfJ2Y6ufUWg-DQ,13143
763
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py,sha256=drRm_Hn8Da4Oc_dsa4ol_mZSeMRNYT1-Zkmb0LbiEAA,10193
764
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py,sha256=66hs426dR1_TUps9K-UhBp5_xBiFjIouKcF_5gP-Hn8,4797
765
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py,sha256=602GFNpKZjeRhxUTdlcE6ZczcFEGEjp0qLoTbkM54M4,11558
762
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py,sha256=01XWPppmkeovJ-9AzMNOpiqomLGdZGMu2FmQ2BIfoE8,13142
763
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py,sha256=p4L1lFqAlz3D6R3mMUvvtSiFdZZGT_aEzIEpBs5hsv0,10501
764
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py,sha256=qJtswtxnuLSGWPVq599AXindPNDoem8cR1-0mgI1POc,4807
765
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py,sha256=is75jOQhykejNmj4Pea875s2MwBL0wZCxPhI-YostY4,11556
766
766
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py,sha256=sQDkzfpVNaSYBHVcHYqydRSWN0i-yV9uhtEAggg295A,10879
767
767
  supervisely/nn/benchmark/comparison/semantic_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
768
768
  supervisely/nn/benchmark/comparison/semantic_segmentation/text_templates.py,sha256=iHybWNpH9-LTjx8-iuyNawVoBXwXzho66a1K_afY_9Q,7622
@@ -777,36 +777,36 @@ supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/renormaliz
777
777
  supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/speedtest.py,sha256=sQDkzfpVNaSYBHVcHYqydRSWN0i-yV9uhtEAggg295A,10879
778
778
  supervisely/nn/benchmark/instance_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
779
779
  supervisely/nn/benchmark/instance_segmentation/benchmark.py,sha256=lTDzgKGpfeF5o_a2nS56wiAsUQPH1eubk37b9CaB2KI,1171
780
- supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
780
+ supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=NoaecTcEp-LhsDQcHNQZi1gzNXcahgycKy_C4aDcSSw,304
781
781
  supervisely/nn/benchmark/instance_segmentation/evaluator.py,sha256=mpCi8S6YNwlVvgcERQSHBOhC9PrSfQkQ55pPTcK6V9c,2811
782
- supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=l2I1PbenuBzcCPu_h2J5JE5gQsJCr6lNnoIzk5BEuwc,25868
782
+ supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=usKqm_FaO-WXiopxzrdjpIrOqHdqFQ89lmYoayzt6KM,25597
783
783
  supervisely/nn/benchmark/instance_segmentation/visualizer.py,sha256=8NscOKy7JK4AG-Czu3SM0qJQXLDfKD9URdG1d4nz89E,564
784
784
  supervisely/nn/benchmark/object_detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
785
- supervisely/nn/benchmark/object_detection/base_vis_metric.py,sha256=XjUnFCnCMYLrpjojIOwiRNaSsSLYpozTHWfwLkaCd5U,1612
785
+ supervisely/nn/benchmark/object_detection/base_vis_metric.py,sha256=44Em214YPxZgn2hEzFvqBcnjsyiElD9TSuLamwUnx20,1611
786
786
  supervisely/nn/benchmark/object_detection/benchmark.py,sha256=Wb4xlFXilIMVfsifNNQY25uE52NeEDLzQpnq8QPYq9U,1086
787
- supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
788
- supervisely/nn/benchmark/object_detection/evaluator.py,sha256=EOQQbmwQqjjvbRu3tY24SRA7K8nyqshR92gUcP1lcrY,7371
789
- supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=iV79hlyB-_wj-X25-JPjoXfwQWNxGrMVweha3JZA46M,22557
790
- supervisely/nn/benchmark/object_detection/text_templates.py,sha256=ZjkcP91dj98_1xqxKSy5TGrU08puXaLhpjNC-c_41A0,26113
791
- supervisely/nn/benchmark/object_detection/visualizer.py,sha256=NpLKVW5fo6N0kYzgLsfY66wvCv38G3k-SNm4HImXt6g,32366
787
+ supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=NoaecTcEp-LhsDQcHNQZi1gzNXcahgycKy_C4aDcSSw,304
788
+ supervisely/nn/benchmark/object_detection/evaluator.py,sha256=s-hPBm5BmoCgwoozVyDacum4kVLNtYK6I6NCt_L_LSA,7278
789
+ supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=59UnOX7VuYvVQFeUJy5v6EFIpqSDNgx5wMp9qyVixgM,23686
790
+ supervisely/nn/benchmark/object_detection/text_templates.py,sha256=4BgTIX1Co4WK9_VSUa1qWCmh5OJzo3_opVU6LOjKSjc,25842
791
+ supervisely/nn/benchmark/object_detection/visualizer.py,sha256=0eJ-ATDVMyjMRInoEK604vL3vJPN5Ge1HjarS11CS0A,32365
792
792
  supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py,sha256=AXCLHEySEdR-B-5sfDoWBmmOLBVlyW2U_xr8Ta42sQI,2096
793
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_distribution.py,sha256=OlwkPgzEQ-RegcLZHVUVOL0n6I_2iayPVpAIie4y2O8,3615
794
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=kuOhQDNwAsBbtjuMU_7GajVzu6j6n3xDJTv_hNKX6o8,4007
793
+ supervisely/nn/benchmark/object_detection/vis_metrics/confidence_distribution.py,sha256=J6l9Vc8TGsvkTyH5u7Ry4P2jgJC2GYmcgOeMALftZBw,4254
794
+ supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=pmxnF_UJk0WhqEdL7O_yIjIBtxPipLQZVJwCr6XB3zc,4751
795
795
  supervisely/nn/benchmark/object_detection/vis_metrics/confusion_matrix.py,sha256=2PJUt0-njRpzN7XBGjkSt9kkh5tDPuv_Sne-2v8DWHc,3731
796
- supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py,sha256=wIYfq3izM2XNJHr56h3j5XhuU8W8Y3wO_RKAwxntQs4,4855
796
+ supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py,sha256=sMSFntieAdmL8siTKwppB4RxIhfZlsg1loUeOEhiKOg,5234
797
797
  supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py,sha256=Aewzu2QhxZoPT_k0QJt_G11B8M2DXLCGRjE0MlVYNko,2892
798
798
  supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py,sha256=7rObk7WNsfwK7xBWl3aOxcn0uD48njEc04fQIPHc3_4,4678
799
799
  supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py,sha256=lv4Bk8W4X8ZhvQKyMXI46d240PNlMFx1hdji_aoTS50,3601
800
- supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py,sha256=byucJuHYWSXIZU8U1Dc44QDpG3lTlhoNdUfD1b-uriw,4721
801
- supervisely/nn/benchmark/object_detection/vis_metrics/model_predictions.py,sha256=gsGDsesiwOcqeFvHr33b4PSJNw6MoA5brO-qRydRtsA,5944
802
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py,sha256=HuTgisYmXCSUeF5WOahy-uaCdvRLsNzg28BDrZ-5hww,7161
803
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=BKsb1XGVsg6-aOI5G6NibxvdD5lVzkMjHisI8T85Sns,6853
804
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=uBxHIz8t0ujS5MI-LASg1RrPx3YqeKKc7dWz3MozHIU,6546
805
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py,sha256=EeZmyNlTVQLQ-0wIDGdvFmRkahJBBiOKSmWiAJ8Bfks,3478
800
+ supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py,sha256=cbGwhG95rp5NmOy0qHvpSiAjWqtQQMWCfKs7FzbuKzc,4747
801
+ supervisely/nn/benchmark/object_detection/vis_metrics/model_predictions.py,sha256=7ynfCcZMFdwxFf2seUiFc_lc5XzkExIdxI5wr_64mmQ,5943
802
+ supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py,sha256=yPC-6ionooo0eUEiKSOFb-LkhqOuWUXwWvQt6SBQ4Ws,4414
803
+ supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=FDba0gWJWnf7mGtl79TONC2fihc39sc-tXCVLYCt84Y,6852
804
+ supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=-22iCkQkpSQP5SZclyM0H9jSs-EynEbBgnhJKayG634,6923
805
+ supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py,sha256=64pWJD0306D2NYupxLD1irhpGgAOCgoTpZiSVBBdqDU,3488
806
806
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py,sha256=Bl_buVvH8SVqwsc4DcHnojMOqpwTnRgXFt9yw_Y1BR0,1607
807
- supervisely/nn/benchmark/object_detection/vis_metrics/precision.py,sha256=cAgMrp13uulHfM8xnPDZyR6PqS8nck1Fo7YPpvHPCbw,2708
807
+ supervisely/nn/benchmark/object_detection/vis_metrics/precision.py,sha256=lrxCH82blwMVAxofLDmqPPgs7V1S9AFgmfxtHAoWEdI,2973
808
808
  supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py,sha256=IetoRTE9xZHHMZlPwUmQyTnaD7cQpnIQ3zJmUUr6fgY,1965
809
- supervisely/nn/benchmark/object_detection/vis_metrics/recall.py,sha256=AAxg3eJDjWIZEarOMZNcdIcYpVDFFDlbc5bwF4X3GIo,2579
809
+ supervisely/nn/benchmark/object_detection/vis_metrics/recall.py,sha256=sgvxqfXrT5fPtV8_t1Sr11Ln66MPYNlQlPVSmXjqnYU,2864
810
810
  supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py,sha256=u-00HH54XzlhFuzc7U-mk2-IhUSEPYzv23BIIz8STvk,1984
811
811
  supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py,sha256=_8ie3dPfwOyNCg-YhqO5jFW6kubCfQC2Obn9QSZFVeQ,3615
812
812
  supervisely/nn/benchmark/object_detection/vis_metrics/speedtest.py,sha256=uAYu42WUcQCBdfm-PZxOPAGLDsPlcnDJL7PCXDRStbY,5449
@@ -831,10 +831,9 @@ supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py,sha256=RT
831
831
  supervisely/nn/benchmark/semantic_segmentation/vis_metrics/renormalized_error_ou.py,sha256=w4oqirf_o7uz0fwaapaFR0ByjCBSEfMv--ZgEZTkuFQ,1575
832
832
  supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py,sha256=0UP_HReIciHQyU6sOXnWhAzyVBoGznV7ZHF8m4vEVX0,5941
833
833
  supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py,sha256=rRdYZxmhQX4T3RsXJVGp34NMZPz8jUHtVvBN5BpPJ5I,603
834
- supervisely/nn/benchmark/utils/__init__.py,sha256=JHT73gWdwgLJKTiCpDdwggm1t_EWB0JCC90-zD7reXM,741
835
- supervisely/nn/benchmark/utils/detection/__init__.py,sha256=L3QKGuKUlR2N_QFRTRsa6gfLDbksIaFMYO0Hukxxy1U,172
836
- supervisely/nn/benchmark/utils/detection/calculate_metrics.py,sha256=LXZET5yLp9S7Uq2eX4HpAMnBMxTI5Q2CgKSc1mCfaRM,11388
837
- supervisely/nn/benchmark/utils/detection/metric_provider.py,sha256=cgF6uzF7XOvU2CpxyU7zuK1HH6hhNiIV3vQc8MAzwMU,19934
834
+ supervisely/nn/benchmark/utils/__init__.py,sha256=r0Ay4OMqfIL-9wwJykKji_Uks2Dm9vUhyA7hT8eLxII,657
835
+ supervisely/nn/benchmark/utils/detection/__init__.py,sha256=6CsMxQqUp1GOc-2Wmnw2lamtvklHo2tcCYTxgT5NsZo,88
836
+ supervisely/nn/benchmark/utils/detection/calculate_metrics.py,sha256=gC6by_2HT9ACuxbtW93eKeioW9sCMMDM3aPi99w1xx8,11963
838
837
  supervisely/nn/benchmark/utils/detection/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
839
838
  supervisely/nn/benchmark/utils/detection/sly2coco.py,sha256=0O2LSCU5zIX34mD4hZIv8O3-j6LwnB0DqhiVPAiosO8,6883
840
839
  supervisely/nn/benchmark/utils/detection/utlis.py,sha256=dKhsOGmQKH20-IlD90DWfZzi171j65N71hNdHRCX5Hs,954
@@ -844,10 +843,10 @@ supervisely/nn/benchmark/utils/semantic_segmentation/evaluator.py,sha256=iPHRo1L
844
843
  supervisely/nn/benchmark/utils/semantic_segmentation/loader.py,sha256=_5ZZ7Nkd8WWYJnKwc1Dx3bEPS_1R84gG_hQc0w0TXWw,1957
845
844
  supervisely/nn/benchmark/utils/semantic_segmentation/utils.py,sha256=X5NiR02R-0To2_SuSGHZZccl_-Bupg5F9d7nziIMRMc,3874
846
845
  supervisely/nn/benchmark/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
847
- supervisely/nn/benchmark/visualization/evaluation_result.py,sha256=733HJL4rJa5XqCJydW9vSyaepvpHzym9wQsw1wFEgeI,10251
846
+ supervisely/nn/benchmark/visualization/evaluation_result.py,sha256=QNAvxy1-W15cyHZbeAysiDWvXAgdgQZnRAlhL_Uzlxs,10159
848
847
  supervisely/nn/benchmark/visualization/renderer.py,sha256=j6dFqIGa9M0DbVVR-jgJab-MzUUiw47coHiiBF_H_jQ,3923
849
848
  supervisely/nn/benchmark/visualization/report_template.html,sha256=tylBK5Bb2cqKACK1GZUKyIjPS9yHQFHAS-QeEEwhqTE,2172
850
- supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=hBeVepHngTGVHK3MiWe8qZY87taifxnoUXq22W2xaqo,3724
849
+ supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=4YE0nJJrzuBEbGChzkivblC-pAwEeXsKU55sXVZQyt0,3716
851
850
  supervisely/nn/benchmark/visualization/widgets/__init__.py,sha256=UovmhwLH4Au81JFrFz0NwPasaIqPEI-zXN-JntTc2FU,949
852
851
  supervisely/nn/benchmark/visualization/widgets/widget.py,sha256=1cpfyaZjug8ZQIRR5SB4BgkSxP3BMwkYFRWH36U1eF0,818
853
852
  supervisely/nn/benchmark/visualization/widgets/chart/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -1071,9 +1070,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1071
1070
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1072
1071
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1073
1072
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1074
- supervisely-6.73.294.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1075
- supervisely-6.73.294.dist-info/METADATA,sha256=tgjdB_FSwcOSyWj9_Mxkeije_FDbelZVyzWcy4vGTDo,33573
1076
- supervisely-6.73.294.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1077
- supervisely-6.73.294.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1078
- supervisely-6.73.294.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1079
- supervisely-6.73.294.dist-info/RECORD,,
1073
+ supervisely-6.73.296.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1074
+ supervisely-6.73.296.dist-info/METADATA,sha256=jw6IjeK21DXTFEgbISalLDxHn7BWvWAQkZ3rbhAMJ5g,33573
1075
+ supervisely-6.73.296.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1076
+ supervisely-6.73.296.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1077
+ supervisely-6.73.296.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1078
+ supervisely-6.73.296.dist-info/RECORD,,