supervisely 6.73.286__py3-none-any.whl → 6.73.287__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -620,6 +620,9 @@ class BaseBenchmark:
620
620
  def get_eval_result(self):
621
621
  if self._eval_results is None:
622
622
  self._eval_results = self.evaluator.get_eval_result()
623
+ if not self._eval_results.inference_info:
624
+ self._eval_results.inference_info["gt_project_id"] = self.gt_project_info.id
625
+ self._eval_results.inference_info["dt_project_id"] = self.dt_project_info.id
623
626
  return self._eval_results
624
627
 
625
628
  def get_diff_project_info(self):
@@ -16,7 +16,7 @@ class BaseEvalResult:
16
16
 
17
17
  def __init__(self, directory: Optional[str] = None):
18
18
  self.directory = directory
19
- self.inference_info: Dict = None
19
+ self.inference_info: Dict = {}
20
20
  self.speedtest_info: Dict = None
21
21
  self.eval_data: Dict = None
22
22
  self.mp = None
@@ -14,7 +14,7 @@ definitions = SimpleNamespace(
14
14
  )
15
15
 
16
16
  docs_url = (
17
- "https://docs.supervisely.com/neural-networks/model-evaluation-benchmark/instant-segmentation"
17
+ "https://docs.supervisely.com/neural-networks/model-evaluation-benchmark/instance-segmentation"
18
18
  )
19
19
 
20
20
  # <i class="zmdi zmdi-check-circle" style="color: #13ce66; margin-right: 5px"></i>
@@ -44,6 +44,7 @@ markdown_overview = """
44
44
  {}
45
45
  - **IoU threshold**: {}
46
46
  - **Optimal confidence threshold**: {} (calculated automatically), <a href="{}" target="_blank">learn more</a>.
47
+ - **Averaging across IoU thresholds:** {}, <a href="{}" target="_blank">learn more</a>.
47
48
 
48
49
  Learn more about Model Benchmark, implementation details, and how to use the charts in our <a href="{}" target="_blank">Technical Report</a>.
49
50
  """
@@ -60,7 +61,7 @@ Here, we comprehensively assess the model's performance by presenting a broad se
60
61
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
61
62
  """
62
63
 
63
- markdown_AP_custom_description = """> * AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
64
+ markdown_AP_custom_description = """> *AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
64
65
 
65
66
  markdown_iou_per_class = """### IoU Threshold per Class
66
67
 
@@ -90,13 +90,16 @@ class MetricProvider:
90
90
  self.iouThrs = params["iouThrs"]
91
91
  self.recThrs = params["recThrs"]
92
92
 
93
+ # Evaluation params
93
94
  eval_params = params.get("evaluation_params", {})
94
95
  self.iou_threshold = eval_params.get("iou_threshold", 0.5)
95
96
  self.iou_threshold_idx = np.where(np.isclose(self.iouThrs, self.iou_threshold))[0][0]
96
-
97
- # IoU per class (optional)
98
97
  self.iou_threshold_per_class = eval_params.get("iou_threshold_per_class")
99
98
  self.iou_idx_per_class = params.get("iou_idx_per_class") # {cat id: iou_idx}
99
+ if self.iou_threshold_per_class is not None:
100
+ # TODO: temporary solution
101
+ eval_params["average_across_iou_thresholds"] = False
102
+ self.average_across_iou_thresholds = eval_params.get("average_across_iou_thresholds", True)
100
103
 
101
104
  def calculate(self):
102
105
  self.m_full = _MetricProvider(
@@ -199,9 +202,10 @@ class MetricProvider:
199
202
  }
200
203
 
201
204
  def AP_per_class(self):
202
- s = self.coco_precision[:, :, :, 0, 2]
205
+ s = self.coco_precision[:, :, :, 0, 2].copy()
203
206
  s[s == -1] = np.nan
204
207
  ap = np.nanmean(s, axis=(0, 1))
208
+ ap = np.nan_to_num(ap, nan=0)
205
209
  return ap
206
210
 
207
211
  def AP_custom_per_class(self):
@@ -212,6 +216,7 @@ class MetricProvider:
212
216
  s[:, cat_id - 1] = self.coco_precision[iou_idx, :, cat_id - 1, 0, 2]
213
217
  s[s == -1] = np.nan
214
218
  ap = np.nanmean(s, axis=0)
219
+ ap = np.nan_to_num(ap, nan=0)
215
220
  return ap
216
221
 
217
222
  def AP_custom(self):
@@ -284,6 +289,14 @@ class _MetricProvider:
284
289
  self.fp_not_confused_matches = [m for m in self.fp_matches if not m["miss_cls"]]
285
290
  self.ious = np.array([m["iou"] for m in self.tp_matches])
286
291
 
292
+ # Evaluation params
293
+ self.params = params
294
+ self.iou_idx_per_class = np.array(
295
+ [params["iou_idx_per_class"][cat_id] for cat_id in self.cat_ids]
296
+ )[:, None]
297
+ eval_params = params.get("evaluation_params", {})
298
+ self.average_across_iou_thresholds = eval_params.get("average_across_iou_thresholds", True)
299
+
287
300
  def _init_counts(self):
288
301
  cat_ids = self.cat_ids
289
302
  iouThrs = self.iouThrs
@@ -328,14 +341,22 @@ class _MetricProvider:
328
341
  self.true_positives = true_positives
329
342
  self.false_negatives = false_negatives
330
343
  self.false_positives = false_positives
331
- self.TP_count = int(self.true_positives[:, 0].sum(0))
332
- self.FP_count = int(self.false_positives[:, 0].sum(0))
333
- self.FN_count = int(self.false_negatives[:, 0].sum(0))
344
+ self.TP_count = int(self._take_iou_thresholds(true_positives).sum())
345
+ self.FP_count = int(self._take_iou_thresholds(false_positives).sum())
346
+ self.FN_count = int(self._take_iou_thresholds(false_negatives).sum())
334
347
 
348
+ def _take_iou_thresholds(self, x):
349
+ return np.take_along_axis(x, self.iou_idx_per_class, axis=1)
350
+
335
351
  def base_metrics(self):
336
- tp = self.true_positives
337
- fp = self.false_positives
338
- fn = self.false_negatives
352
+ if self.average_across_iou_thresholds:
353
+ tp = self.true_positives
354
+ fp = self.false_positives
355
+ fn = self.false_negatives
356
+ else:
357
+ tp = self._take_iou_thresholds(self.true_positives)
358
+ fp = self._take_iou_thresholds(self.false_positives)
359
+ fn = self._take_iou_thresholds(self.false_negatives)
339
360
  confuse_count = len(self.confused_matches)
340
361
 
341
362
  mAP = self.coco_mAP
@@ -358,9 +379,14 @@ class _MetricProvider:
358
379
  }
359
380
 
360
381
  def per_class_metrics(self):
361
- tp = self.true_positives.mean(1)
362
- fp = self.false_positives.mean(1)
363
- fn = self.false_negatives.mean(1)
382
+ if self.average_across_iou_thresholds:
383
+ tp = self.true_positives.mean(1)
384
+ fp = self.false_positives.mean(1)
385
+ fn = self.false_negatives.mean(1)
386
+ else:
387
+ tp = self._take_iou_thresholds(self.true_positives).flatten()
388
+ fp = self._take_iou_thresholds(self.false_positives).flatten()
389
+ fn = self._take_iou_thresholds(self.false_negatives).flatten()
364
390
  pr = tp / (tp + fp)
365
391
  rc = tp / (tp + fn)
366
392
  f1 = 2 * pr * rc / (pr + rc)
@@ -44,6 +44,7 @@ markdown_overview = """
44
44
  {}
45
45
  - **IoU threshold**: {}
46
46
  - **Optimal confidence threshold**: {} (calculated automatically), <a href="{}" target="_blank">learn more</a>.
47
+ - **Averaging across IoU thresholds:** {}, <a href="{}" target="_blank">learn more</a>.
47
48
 
48
49
  Learn more about Model Benchmark, implementation details, and how to use the charts in our <a href="{}" target="_blank">Technical Report</a>.
49
50
  """
@@ -65,7 +66,7 @@ Here, we comprehensively assess the model's performance by presenting a broad se
65
66
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
66
67
  """
67
68
 
68
- markdown_AP_custom_description = """> * AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
69
+ markdown_AP_custom_description = """> *AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
69
70
 
70
71
  markdown_iou_per_class = """### IoU Threshold per Class
71
72
 
@@ -72,13 +72,13 @@ class ConfidenceScore(DetectionVisMetric):
72
72
  self.eval_result.dfsp_down,
73
73
  x="scores",
74
74
  y=["precision", "recall", "f1"],
75
- labels={"value": "Value", "variable": "Metric", "scores": "Confidence Score"},
75
+ labels={"value": "Metric", "variable": "Metric", "scores": "Confidence Score"},
76
76
  width=None,
77
77
  height=500,
78
78
  color_discrete_map=color_map,
79
79
  )
80
80
  fig.update_traces(
81
- hovertemplate="Confidence Score: %{x:.2f}<br>Value: %{y:.2f}<extra></extra>"
81
+ hovertemplate="Confidence score: %{x:.2f}<br>Metric: %{y:.2f}<extra></extra>"
82
82
  )
83
83
  fig.update_layout(yaxis=dict(range=[0, 1]), xaxis=dict(range=[0, 1], tick0=0, dtick=0.1))
84
84
 
@@ -39,7 +39,7 @@ class F1ScoreAtDifferentIOU(DetectionVisMetric):
39
39
  np.concatenate([self.eval_result.dfsp_down["scores"].values[:, None], f1s_down.T], 1),
40
40
  columns=["scores"] + iou_names,
41
41
  )
42
- labels = {"value": "Value", "variable": "IoU threshold", "scores": "Confidence Score"}
42
+ labels = {"value": "F1-score", "variable": "IoU threshold", "scores": "Confidence Score"}
43
43
 
44
44
  fig = px.line(
45
45
  df,
@@ -51,19 +51,19 @@ class F1ScoreAtDifferentIOU(DetectionVisMetric):
51
51
  height=500,
52
52
  )
53
53
  fig.update_traces(
54
- hovertemplate="Confidence Score: %{x:.2f}<br>Value: %{y:.2f}<extra></extra>"
54
+ hovertemplate="Confidence Score: %{x:.2f}<br>F1-score: %{y:.2f}<extra></extra>"
55
55
  )
56
56
  fig.update_layout(yaxis=dict(range=[0, 1]), xaxis=dict(range=[0, 1], tick0=0, dtick=0.1))
57
57
 
58
58
  # add annotations for maximum F1-Score for each IoU threshold
59
59
  for i, iou in enumerate(iou_names):
60
- argmax_f1 = f1s[i].argmax()
60
+ argmax_f1 = np.nanargmax(f1s[i])
61
61
  max_f1 = f1s[i][argmax_f1]
62
62
  score = self.eval_result.mp.m_full.score_profile["scores"][argmax_f1]
63
63
  fig.add_annotation(
64
64
  x=score,
65
65
  y=max_f1,
66
- text=f"Best score: {score:.2f}",
66
+ text=f"Best conf: {score:.2f}",
67
67
  showarrow=True,
68
68
  arrowhead=1,
69
69
  arrowcolor="black",
@@ -86,11 +86,10 @@ class PerClassOutcomeCounts(DetectionVisMetric):
86
86
  import plotly.express as px # pylint: disable=import-error
87
87
 
88
88
  # Per-class Counts
89
- iou_thres = 0
90
-
91
- tp = self.eval_result.mp.true_positives[:, iou_thres]
92
- fp = self.eval_result.mp.false_positives[:, iou_thres]
93
- fn = self.eval_result.mp.false_negatives[:, iou_thres]
89
+ mp = self.eval_result.mp
90
+ tp = mp.m._take_iou_thresholds(mp.true_positives).flatten()
91
+ fp = mp.m._take_iou_thresholds(mp.false_positives).flatten()
92
+ fn = mp.m._take_iou_thresholds(mp.false_negatives).flatten()
94
93
 
95
94
  # normalize
96
95
  support = tp + fn
@@ -20,7 +20,7 @@ class Overview(DetectionVisMetric):
20
20
  url = self.eval_result.inference_info.get("checkpoint_url")
21
21
  link_text = self.eval_result.inference_info.get("custom_checkpoint_path")
22
22
  if link_text is None:
23
- link_text = url
23
+ link_text = url or ""
24
24
  link_text = link_text.replace("_", "\_")
25
25
 
26
26
  model_name = self.eval_result.inference_info.get("model_name") or "Custom"
@@ -31,6 +31,7 @@ class Overview(DetectionVisMetric):
31
31
 
32
32
  # link to scroll to the optimal confidence section
33
33
  opt_conf_url = self.vis_texts.docs_url + "#f1-optimal-confidence-threshold"
34
+ average_url = self.vis_texts.docs_url + "#averaging-iou-thresholds"
34
35
 
35
36
  iou_threshold = self.eval_result.mp.iou_threshold
36
37
  if self.eval_result.different_iou_thresholds_per_class:
@@ -52,6 +53,8 @@ class Overview(DetectionVisMetric):
52
53
  iou_threshold,
53
54
  round(self.eval_result.mp.f1_optimal_conf, 4),
54
55
  opt_conf_url,
56
+ self.eval_result.mp.average_across_iou_thresholds,
57
+ average_url,
55
58
  self.vis_texts.docs_url,
56
59
  ]
57
60
 
@@ -32,8 +32,7 @@ class PerClassAvgPrecision(DetectionVisMetric):
32
32
  import plotly.express as px # pylint: disable=import-error
33
33
 
34
34
  # AP per-class
35
- ap_per_class = self.eval_result.mp.coco_precision[:, :, :, 0, 2].mean(axis=(0, 1))
36
- ap_per_class[ap_per_class == -1] = 0 # -1 is a placeholder for no GT
35
+ ap_per_class = self.eval_result.mp.AP_per_class()
37
36
  labels = dict(r="Average Precision", theta="Class")
38
37
  fig = px.scatter_polar(
39
38
  r=ap_per_class,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.286
3
+ Version: 6.73.287
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -744,8 +744,8 @@ supervisely/nn/artifacts/utils.py,sha256=C4EaMi95MAwtK5TOnhK4sQ1BWvgwYBxXyRStkhY
744
744
  supervisely/nn/artifacts/yolov5.py,sha256=slh05EpQsxqgKwB9KMClshdBxPBN3ZWZ6S4B80ECEt4,1724
745
745
  supervisely/nn/artifacts/yolov8.py,sha256=sFd9kU7Gdowq6WH1S3NdlQeoL9jjQKmRYb51fG_wbDk,1446
746
746
  supervisely/nn/benchmark/__init__.py,sha256=7jDezvavJFtO9mDeB2TqW8N4sD8TsHQBPpA9RESleIQ,610
747
- supervisely/nn/benchmark/base_benchmark.py,sha256=Xnb0jL0voBPC-s_eVYSYbYv-xVfLYtQf1tHLnJ9ktq8,25713
748
- supervisely/nn/benchmark/base_evaluator.py,sha256=sc8gNn3myGA8sGnP6EIiTp24JPXUQ9Ou-8BmTf-Dt7w,5248
747
+ supervisely/nn/benchmark/base_benchmark.py,sha256=2buF7mD58igPMBPiEAJqfWHRO-JdPPzzOVlbR7-nvoA,25956
748
+ supervisely/nn/benchmark/base_evaluator.py,sha256=MJeZnMcWr_cbeJ2r0GJ4SWgjWX5w33Y3pYVR6kCIQMQ,5246
749
749
  supervisely/nn/benchmark/base_visualizer.py,sha256=7woiYmztDzYZlbhL1hTfJnIi26RFi4obF2VLA519uxQ,10092
750
750
  supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
751
751
  supervisely/nn/benchmark/comparison/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -779,33 +779,33 @@ supervisely/nn/benchmark/instance_segmentation/__init__.py,sha256=47DEQpj8HBSa-_
779
779
  supervisely/nn/benchmark/instance_segmentation/benchmark.py,sha256=lTDzgKGpfeF5o_a2nS56wiAsUQPH1eubk37b9CaB2KI,1171
780
780
  supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
781
781
  supervisely/nn/benchmark/instance_segmentation/evaluator.py,sha256=mpCi8S6YNwlVvgcERQSHBOhC9PrSfQkQ55pPTcK6V9c,2811
782
- supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=_ZIU_3-xlUGKTcbEthxB4Ngt12azdC7pxpgqHHw7M3I,25780
782
+ supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=l2I1PbenuBzcCPu_h2J5JE5gQsJCr6lNnoIzk5BEuwc,25868
783
783
  supervisely/nn/benchmark/instance_segmentation/visualizer.py,sha256=8NscOKy7JK4AG-Czu3SM0qJQXLDfKD9URdG1d4nz89E,564
784
784
  supervisely/nn/benchmark/object_detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
785
785
  supervisely/nn/benchmark/object_detection/base_vis_metric.py,sha256=XjUnFCnCMYLrpjojIOwiRNaSsSLYpozTHWfwLkaCd5U,1612
786
786
  supervisely/nn/benchmark/object_detection/benchmark.py,sha256=Wb4xlFXilIMVfsifNNQY25uE52NeEDLzQpnq8QPYq9U,1086
787
787
  supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
788
788
  supervisely/nn/benchmark/object_detection/evaluator.py,sha256=EOQQbmwQqjjvbRu3tY24SRA7K8nyqshR92gUcP1lcrY,7371
789
- supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=MLVRnSwMQ9lfrlgBt4ThIHTVKY-6zuuEWK5-yVsmaj0,21140
790
- supervisely/nn/benchmark/object_detection/text_templates.py,sha256=J5xUPCGY-QWxc5AEt_u9_2r5q0LBlIzsa007H0GgoeU,26026
789
+ supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=iV79hlyB-_wj-X25-JPjoXfwQWNxGrMVweha3JZA46M,22557
790
+ supervisely/nn/benchmark/object_detection/text_templates.py,sha256=ZjkcP91dj98_1xqxKSy5TGrU08puXaLhpjNC-c_41A0,26113
791
791
  supervisely/nn/benchmark/object_detection/visualizer.py,sha256=NpLKVW5fo6N0kYzgLsfY66wvCv38G3k-SNm4HImXt6g,32366
792
792
  supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py,sha256=AXCLHEySEdR-B-5sfDoWBmmOLBVlyW2U_xr8Ta42sQI,2096
793
793
  supervisely/nn/benchmark/object_detection/vis_metrics/confidence_distribution.py,sha256=OlwkPgzEQ-RegcLZHVUVOL0n6I_2iayPVpAIie4y2O8,3615
794
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=r_saaZI4WB7C7ykNb1obmf8kEOkphLA4pInDoS6dXXU,4005
794
+ supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=kuOhQDNwAsBbtjuMU_7GajVzu6j6n3xDJTv_hNKX6o8,4007
795
795
  supervisely/nn/benchmark/object_detection/vis_metrics/confusion_matrix.py,sha256=2PJUt0-njRpzN7XBGjkSt9kkh5tDPuv_Sne-2v8DWHc,3731
796
796
  supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py,sha256=wIYfq3izM2XNJHr56h3j5XhuU8W8Y3wO_RKAwxntQs4,4855
797
- supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py,sha256=6y2Kx-R_t4SdJkdWNyZQ6TGjCC-u6KhXb4cCno4GuTk,2882
797
+ supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py,sha256=Aewzu2QhxZoPT_k0QJt_G11B8M2DXLCGRjE0MlVYNko,2892
798
798
  supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py,sha256=7rObk7WNsfwK7xBWl3aOxcn0uD48njEc04fQIPHc3_4,4678
799
799
  supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py,sha256=lv4Bk8W4X8ZhvQKyMXI46d240PNlMFx1hdji_aoTS50,3601
800
800
  supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py,sha256=byucJuHYWSXIZU8U1Dc44QDpG3lTlhoNdUfD1b-uriw,4721
801
801
  supervisely/nn/benchmark/object_detection/vis_metrics/model_predictions.py,sha256=gsGDsesiwOcqeFvHr33b4PSJNw6MoA5brO-qRydRtsA,5944
802
802
  supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py,sha256=HuTgisYmXCSUeF5WOahy-uaCdvRLsNzg28BDrZ-5hww,7161
803
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=GBq0KlPka5z4cxHcKCe2eVOI_h3qlWUqGCyhYs6mjrk,6825
804
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=M6E--Yd1ztP4VBjR6VDUVrj2hgs5mwJF-vhWIjgVGkw,6376
803
+ supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=BKsb1XGVsg6-aOI5G6NibxvdD5lVzkMjHisI8T85Sns,6853
804
+ supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=uBxHIz8t0ujS5MI-LASg1RrPx3YqeKKc7dWz3MozHIU,6546
805
805
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py,sha256=EeZmyNlTVQLQ-0wIDGdvFmRkahJBBiOKSmWiAJ8Bfks,3478
806
806
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py,sha256=Bl_buVvH8SVqwsc4DcHnojMOqpwTnRgXFt9yw_Y1BR0,1607
807
807
  supervisely/nn/benchmark/object_detection/vis_metrics/precision.py,sha256=cAgMrp13uulHfM8xnPDZyR6PqS8nck1Fo7YPpvHPCbw,2708
808
- supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py,sha256=X-hyf7OP7hzQzI5Yb2yDU536hxYQLvxcnVyYa3x27XA,2076
808
+ supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py,sha256=IetoRTE9xZHHMZlPwUmQyTnaD7cQpnIQ3zJmUUr6fgY,1965
809
809
  supervisely/nn/benchmark/object_detection/vis_metrics/recall.py,sha256=AAxg3eJDjWIZEarOMZNcdIcYpVDFFDlbc5bwF4X3GIo,2579
810
810
  supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py,sha256=u-00HH54XzlhFuzc7U-mk2-IhUSEPYzv23BIIz8STvk,1984
811
811
  supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py,sha256=_8ie3dPfwOyNCg-YhqO5jFW6kubCfQC2Obn9QSZFVeQ,3615
@@ -1071,9 +1071,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1071
1071
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1072
1072
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1073
1073
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1074
- supervisely-6.73.286.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1075
- supervisely-6.73.286.dist-info/METADATA,sha256=ulxDJ50Pdv9rflRKXgqgB8fpw1pOMrd4xzC1uDdIseU,33573
1076
- supervisely-6.73.286.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1077
- supervisely-6.73.286.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1078
- supervisely-6.73.286.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1079
- supervisely-6.73.286.dist-info/RECORD,,
1074
+ supervisely-6.73.287.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1075
+ supervisely-6.73.287.dist-info/METADATA,sha256=9oifX7yDdPoCm5GaVVjDyURQrPfVIjGIT27n3C10lf8,33573
1076
+ supervisely-6.73.287.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1077
+ supervisely-6.73.287.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1078
+ supervisely-6.73.287.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1079
+ supervisely-6.73.287.dist-info/RECORD,,