supervisely 6.73.206__py3-none-any.whl → 6.73.207__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -1,4 +1,5 @@
1
1
  import os
2
+ import yaml
2
3
  from typing import Callable, List, Optional, Tuple, Union
3
4
 
4
5
  import numpy as np
@@ -31,6 +32,7 @@ class BaseBenchmark:
31
32
  progress: Optional[SlyTqdm] = None,
32
33
  progress_secondary: Optional[SlyTqdm] = None,
33
34
  classes_whitelist: Optional[List[str]] = None,
35
+ evaluation_params: Optional[dict] = None,
34
36
  ):
35
37
  self.api = api
36
38
  self.session: SessionJSON = None
@@ -51,6 +53,8 @@ class BaseBenchmark:
51
53
  self.vis_texts = None
52
54
  self.inference_speed_text = None
53
55
  self.train_info = None
56
+ self.evaluation_params = evaluation_params
57
+ self._validate_evaluation_params()
54
58
 
55
59
  def _get_evaluator_class(self) -> type:
56
60
  raise NotImplementedError()
@@ -152,6 +156,7 @@ class BaseBenchmark:
152
156
  progress=self.pbar,
153
157
  items_count=self.dt_project_info.items_count,
154
158
  classes_whitelist=self.classes_whitelist,
159
+ evaluation_params=self.evaluation_params,
155
160
  )
156
161
  self.evaluator.evaluate()
157
162
 
@@ -552,3 +557,7 @@ class BaseBenchmark:
552
557
  chagned = True
553
558
  if chagned:
554
559
  self.api.project.update_meta(pred_project_id, pred_meta.to_json())
560
+
561
+ def _validate_evaluation_params(self):
562
+ if self.evaluation_params:
563
+ self._get_evaluator_class().validate_evaluation_params(self.evaluation_params)
@@ -1,12 +1,15 @@
1
1
  import os
2
2
  import pickle
3
- from typing import List, Optional
3
+ from typing import List, Optional, Union
4
+
5
+ import yaml
4
6
 
5
7
  from supervisely.app.widgets import SlyTqdm
6
8
  from supervisely.task.progress import tqdm_sly
7
9
 
8
10
 
9
11
  class BaseEvaluator:
12
+ EVALUATION_PARAMS_YAML_PATH: str = None
10
13
 
11
14
  def __init__(
12
15
  self,
@@ -16,6 +19,7 @@ class BaseEvaluator:
16
19
  progress: Optional[SlyTqdm] = None,
17
20
  items_count: Optional[int] = None, # TODO: is it needed?
18
21
  classes_whitelist: Optional[List[str]] = None,
22
+ evaluation_params: Optional[dict] = None,
19
23
  ):
20
24
  self.gt_project_path = gt_project_path
21
25
  self.dt_project_path = dt_project_path
@@ -25,12 +29,36 @@ class BaseEvaluator:
25
29
  os.makedirs(result_dir, exist_ok=True)
26
30
  self.classes_whitelist = classes_whitelist
27
31
 
32
+ if evaluation_params is None:
33
+ evaluation_params = self._get_default_evaluation_params()
34
+ self.evaluation_params = evaluation_params
35
+ if self.evaluation_params:
36
+ self.validate_evaluation_params(self.evaluation_params)
37
+
28
38
  def evaluate(self):
29
39
  raise NotImplementedError()
30
40
 
31
41
  def get_result_dir(self) -> str:
32
42
  return self.result_dir
33
43
 
44
+ @classmethod
45
+ def load_yaml_evaluation_params(cls) -> Union[str, None]:
46
+ if cls.EVALUATION_PARAMS_YAML_PATH is None:
47
+ return None
48
+ with open(cls.EVALUATION_PARAMS_YAML_PATH, "r") as f:
49
+ return f.read()
50
+
51
+ @classmethod
52
+ def validate_evaluation_params(cls, evaluation_params: dict) -> None:
53
+ pass
54
+
55
+ @classmethod
56
+ def _get_default_evaluation_params(cls) -> dict:
57
+ if cls.EVALUATION_PARAMS_YAML_PATH is None:
58
+ return {}
59
+ else:
60
+ return yaml.safe_load(cls.load_yaml_evaluation_params())
61
+
34
62
  def _dump_pickle(self, data, file_path):
35
63
  with open(file_path, "wb") as f:
36
64
  pickle.dump(data, f)
@@ -1,10 +1,39 @@
1
1
  from collections import defaultdict
2
- from typing import Callable, Optional, Literal
2
+ from typing import Callable, List, Literal, Optional
3
3
 
4
4
  import numpy as np
5
5
 
6
-
7
- def calculate_metrics(cocoGt, cocoDt, iouType: Literal["bbox", "segm"], progress_cb: Optional[Callable] = None):
6
+ # pylint: disable=import-error
7
+ from pycocotools.cocoeval import COCOeval
8
+
9
+
10
+ def set_cocoeval_params(
11
+ cocoeval: COCOeval,
12
+ parameters: dict,
13
+ ):
14
+ if parameters is None:
15
+ return
16
+ param_names = (
17
+ "iouThrs",
18
+ "recThrs",
19
+ "maxDets",
20
+ "areaRng",
21
+ "areaRngLbl",
22
+ # "kpt_oks_sigmas" # For keypoints
23
+ )
24
+ for param_name in param_names:
25
+ cocoeval.params.__setattr__(
26
+ param_name, parameters.get(param_name, cocoeval.params.__getattribute__(param_name))
27
+ )
28
+
29
+
30
+ def calculate_metrics(
31
+ cocoGt,
32
+ cocoDt,
33
+ iouType: Literal["bbox", "segm"],
34
+ progress_cb: Optional[Callable] = None,
35
+ evaluation_params: Optional[dict] = None,
36
+ ):
8
37
  """
9
38
  Calculate COCO metrics.
10
39
 
@@ -19,35 +48,42 @@ def calculate_metrics(cocoGt, cocoDt, iouType: Literal["bbox", "segm"], progress
19
48
  :return: Results of the evaluation
20
49
  :rtype: dict
21
50
  """
22
- from pycocotools.cocoeval import COCOeval # pylint: disable=import-error
23
51
 
24
- progress_cb(1) if progress_cb is not None else None
25
52
  cocoEval = COCOeval(cocoGt, cocoDt, iouType=iouType)
26
- progress_cb(1) if progress_cb is not None else None
27
53
  cocoEval.evaluate()
28
54
  progress_cb(1) if progress_cb is not None else None
29
55
  cocoEval.accumulate()
30
56
  progress_cb(1) if progress_cb is not None else None
31
57
  cocoEval.summarize()
32
- progress_cb(1) if progress_cb is not None else None
33
58
 
34
59
  # For classification metrics
35
60
  cocoEval_cls = COCOeval(cocoGt, cocoDt, iouType=iouType)
36
- progress_cb(1) if progress_cb is not None else None
37
61
  cocoEval_cls.params.useCats = 0
38
62
  cocoEval_cls.evaluate()
39
63
  progress_cb(1) if progress_cb is not None else None
40
64
  cocoEval_cls.accumulate()
41
65
  progress_cb(1) if progress_cb is not None else None
42
66
  cocoEval_cls.summarize()
43
- progress_cb(1) if progress_cb is not None else None
67
+
68
+ iou_t = 0
69
+ is_custom_iou_threshold = (
70
+ evaluation_params is not None and evaluation_params.get("iou_threshold") and evaluation_params["iou_threshold"] != 0.5
71
+ )
72
+ if is_custom_iou_threshold:
73
+ iou_t = np.where(cocoEval.params.iouThrs == evaluation_params["iou_threshold"])[0][0]
44
74
 
45
75
  eval_img_dict = get_eval_img_dict(cocoEval)
46
76
  eval_img_dict_cls = get_eval_img_dict(cocoEval_cls)
47
- matches = get_matches(eval_img_dict, eval_img_dict_cls, cocoEval_cls, iou_t=0)
77
+ matches = get_matches(eval_img_dict, eval_img_dict_cls, cocoEval_cls, iou_t=iou_t)
48
78
 
49
- params = {"iouThrs": cocoEval.params.iouThrs, "recThrs": cocoEval.params.recThrs}
79
+ params = {
80
+ "iouThrs": cocoEval.params.iouThrs,
81
+ "recThrs": cocoEval.params.recThrs,
82
+ "evaluation_params": evaluation_params or {},
83
+ }
50
84
  coco_metrics = {"mAP": cocoEval.stats[0], "precision": cocoEval.eval["precision"]}
85
+ coco_metrics["AP50"] = cocoEval.stats[1]
86
+ coco_metrics["AP75"] = cocoEval.stats[2]
51
87
  eval_data = {
52
88
  "matches": matches,
53
89
  "coco_metrics": coco_metrics,
@@ -0,0 +1,2 @@
1
+ # Intersection over Union threshold that will be used for objects mathcing
2
+ iou_threshold: 0.5
@@ -89,6 +89,8 @@ class MetricProvider:
89
89
  self.coco_precision = coco_metrics["precision"]
90
90
  self.iouThrs = params["iouThrs"]
91
91
  self.recThrs = params["recThrs"]
92
+ self.iou_threshold = params["evaluation_params"]["iou_threshold"]
93
+ self.iou_threshold_idx = np.searchsorted(self.iouThrs, self.iou_threshold)
92
94
 
93
95
  def calculate(self):
94
96
  self.m_full = _MetricProvider(
@@ -134,7 +136,64 @@ class MetricProvider:
134
136
  self._scores_tp_and_fp = self.m_full.scores_tp_and_fp()
135
137
  self._maximum_calibration_error = self.m_full.maximum_calibration_error()
136
138
  self._expected_calibration_error = self.m_full.expected_calibration_error()
139
+
140
+ def json_metrics(self):
141
+ base = self.base_metrics()
142
+ iou_name = int(self.iou_threshold * 100)
143
+ ap_by_class = self.AP_per_class().tolist()
144
+ ap_by_class = dict(zip(self.cat_names, ap_by_class))
145
+ ap_custom_by_class = self.AP_custom_per_class().tolist()
146
+ ap_custom_by_class = dict(zip(self.cat_names, ap_custom_by_class))
147
+ return {
148
+ "mAP": base["mAP"],
149
+ "AP50": self.coco_metrics["AP50"],
150
+ "AP75": self.coco_metrics["AP75"],
151
+ f"AP{iou_name}": self.AP_custom(),
152
+ "f1": base["f1"],
153
+ "precision": base["precision"],
154
+ "recall": base["recall"],
155
+ "iou": base["iou"],
156
+ "classification_accuracy": base["classification_accuracy"],
157
+ "calibration_score": base["calibration_score"],
158
+ "f1_optimal_conf": self.f1_optimal_conf,
159
+ "expected_calibration_error": self.expected_calibration_error(),
160
+ "maximum_calibration_error": self.maximum_calibration_error(),
161
+ "AP_by_class": ap_by_class,
162
+ f"AP{iou_name}_by_class": ap_custom_by_class,
163
+ }
164
+
165
+ def metric_table(self):
166
+ table = self.json_metrics()
167
+ iou_name = int(self.iou_threshold * 100)
168
+ return {
169
+ "mAP": table["mAP"],
170
+ "AP50": table["AP50"],
171
+ "AP75": table["AP75"],
172
+ f"AP{iou_name}": table[f"AP{iou_name}"],
173
+ "f1": table["f1"],
174
+ "precision": table["precision"],
175
+ "recall": table["recall"],
176
+ "Avg. IoU": table["iou"],
177
+ "Classification Acc.": table["classification_accuracy"],
178
+ "Calibration Score": table["calibration_score"],
179
+ "optimal confidence threshold": table["f1_optimal_conf"],
180
+ }
137
181
 
182
+ def AP_per_class(self):
183
+ s = self.coco_precision[:, :, :, 0, 2]
184
+ s[s == -1] = np.nan
185
+ ap = np.nanmean(s, axis=(0, 1))
186
+ return ap
187
+
188
+ def AP_custom_per_class(self):
189
+ s = self.coco_precision[self.iou_threshold_idx, :, :, 0, 2]
190
+ s[s == -1] = np.nan
191
+ ap = np.nanmean(s, axis=0)
192
+ return ap
193
+
194
+ def AP_custom(self):
195
+ return np.nanmean(self.AP_custom_per_class())
196
+
138
197
  def base_metrics(self):
139
198
  base = self._base_metrics
140
199
  calibration_score = 1 - self._expected_calibration_error
@@ -4,9 +4,12 @@ from supervisely.io.json import dump_json_file
4
4
  from supervisely.nn.benchmark.coco_utils import read_coco_datasets, sly2coco
5
5
  from supervisely.nn.benchmark.evaluation import BaseEvaluator
6
6
  from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
7
+ from pathlib import Path
7
8
 
8
9
 
9
10
  class InstanceSegmentationEvaluator(BaseEvaluator):
11
+ EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/coco/evaluation_params.yaml"
12
+
10
13
  def evaluate(self):
11
14
  try:
12
15
  self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
@@ -19,12 +22,25 @@ class InstanceSegmentationEvaluator(BaseEvaluator):
19
22
 
20
23
  self._dump_datasets()
21
24
  self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
22
- with self.pbar(message="Evaluation: Calculating metrics", total=10) as p:
25
+ with self.pbar(message="Evaluation: Calculating metrics", total=5) as p:
23
26
  self.eval_data = calculate_metrics(
24
- self.cocoGt, self.cocoDt, iouType="segm", progress_cb=p.update
27
+ self.cocoGt,
28
+ self.cocoDt,
29
+ iouType="segm",
30
+ progress_cb=p.update,
31
+ evaluation_params=self.evaluation_params,
25
32
  )
26
33
  self._dump_eval_results()
27
34
 
35
+ @classmethod
36
+ def validate_evaluation_params(cls, evaluation_params: dict) -> None:
37
+ iou_threshold = evaluation_params.get("iou_threshold")
38
+ if iou_threshold is not None:
39
+ assert iou_threshold in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], (
40
+ f"iou_threshold must be one of [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], "
41
+ f"but got {iou_threshold}"
42
+ )
43
+
28
44
  def _convert_to_coco(self):
29
45
  cocoGt_json = sly2coco(
30
46
  self.gt_project_path,
@@ -4,9 +4,12 @@ from supervisely.io.json import dump_json_file
4
4
  from supervisely.nn.benchmark.coco_utils import read_coco_datasets, sly2coco
5
5
  from supervisely.nn.benchmark.evaluation import BaseEvaluator
6
6
  from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
7
+ from pathlib import Path
7
8
 
8
9
 
9
10
  class ObjectDetectionEvaluator(BaseEvaluator):
11
+ EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/coco/evaluation_params.yaml"
12
+
10
13
  def evaluate(self):
11
14
  try:
12
15
  self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
@@ -17,12 +20,25 @@ class ObjectDetectionEvaluator(BaseEvaluator):
17
20
  "try to use newer version of NN app."
18
21
  )
19
22
  self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
20
- with self.pbar(message="Evaluation: Calculating metrics", total=10) as p:
23
+ with self.pbar(message="Evaluation: Calculating metrics", total=5) as p:
21
24
  self.eval_data = calculate_metrics(
22
- self.cocoGt, self.cocoDt, iouType="bbox", progress_cb=p.update
25
+ self.cocoGt,
26
+ self.cocoDt,
27
+ iouType="bbox",
28
+ progress_cb=p.update,
29
+ evaluation_params=self.evaluation_params,
23
30
  )
24
31
  self._dump_eval_results()
25
32
 
33
+ @classmethod
34
+ def validate_evaluation_params(cls, evaluation_params: dict) -> None:
35
+ iou_threshold = evaluation_params.get("iou_threshold")
36
+ if iou_threshold is not None:
37
+ assert iou_threshold in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], (
38
+ f"iou_threshold must be one of [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], "
39
+ f"but got {iou_threshold}"
40
+ )
41
+
26
42
  def _convert_to_coco(self):
27
43
  cocoGt_json = sly2coco(
28
44
  self.gt_project_path,
@@ -62,7 +62,7 @@ class SpeedtestOverview(MetricVis):
62
62
  res["columns"] = columns
63
63
  res["columnsOptions"] = columns_options
64
64
 
65
- widget.main_column = columns[0]
65
+ widget.main_column = "Batch size"
66
66
  widget.fixed_columns = 1
67
67
  widget.show_header_controls = False
68
68
  return res
@@ -216,6 +216,7 @@ class MetricVis:
216
216
  "mainColumn": widget.main_column,
217
217
  "fixColumns": widget.fixed_columns,
218
218
  "showHeaderControls": widget.show_header_controls,
219
+ "width": f"width:{widget.width}" if widget.width else "",
219
220
  }
220
221
  )
221
222
 
@@ -15,12 +15,13 @@ class Overview(MetricVis):
15
15
  def __init__(self, loader: Visualizer) -> None:
16
16
  super().__init__(loader)
17
17
  self._is_overview = True
18
- info = loader.inference_info
18
+ info = loader.inference_info or {}
19
19
  url = info.get("checkpoint_url")
20
20
  link_text = info.get("custom_checkpoint_path")
21
21
  if link_text is None:
22
22
  link_text = url
23
- link_text = link_text.replace("_", "\_")
23
+ if link_text is not None:
24
+ link_text = link_text.replace("_", "\_")
24
25
 
25
26
  # Note about validation dataset
26
27
  classes_str, note_about_val_dataset, train_session = self.get_overview_info()
@@ -65,6 +66,7 @@ class Overview(MetricVis):
65
66
  self._loader.vis_texts.definitions.confidence_score,
66
67
  ],
67
68
  ),
69
+ table_key_metrics=Widget.Table(),
68
70
  chart=Widget.Chart(),
69
71
  )
70
72
 
@@ -117,7 +119,7 @@ class Overview(MetricVis):
117
119
  return fig
118
120
 
119
121
  def get_overview_info(self):
120
- classes_cnt = len(self._loader._benchmark.classes_whitelist)
122
+ classes_cnt = len(self._loader._benchmark.classes_whitelist or [])
121
123
  classes_str = "classes" if classes_cnt > 1 else "class"
122
124
  classes_str = f"{classes_cnt} {classes_str}"
123
125
 
@@ -158,3 +160,30 @@ class Overview(MetricVis):
158
160
  images_str += f". Evaluated on the whole project ({val_imgs_cnt} images)"
159
161
 
160
162
  return classes_str, images_str, train_session
163
+
164
+ def get_table(self, widget: Widget.Table) -> dict:
165
+ res = {}
166
+
167
+ columns = ["metrics", "values"]
168
+ res["content"] = []
169
+ for metric, value in self._loader.mp.metric_table().items():
170
+ row = [metric, round(value, 2)]
171
+ dct = {
172
+ "row": row,
173
+ "id": metric,
174
+ "items": row,
175
+ }
176
+ res["content"].append(dct)
177
+
178
+ columns_options = [
179
+ {"customCell": True, "disableSort": True},
180
+ {"disableSort": True},
181
+ ]
182
+
183
+ res["columns"] = columns
184
+ res["columnsOptions"] = columns_options
185
+
186
+ widget.main_column = columns[0]
187
+ widget.show_header_controls = False
188
+ widget.width = "60%"
189
+ return res
@@ -177,7 +177,7 @@ template_gallery_str = """<sly-iw-gallery
177
177
 
178
178
 
179
179
  template_table_str = """
180
- <div style="margin-top: 20px; margin-bottom: 30px;">
180
+ <div style="margin-top: 20px; margin-bottom: 30px; {{ width }}">
181
181
  <sly-iw-table
182
182
  iw-widget-id="{{ widget_id }}"
183
183
  {% if clickable %}
@@ -208,11 +208,17 @@ template_table_str = """
208
208
  slot-scope="{ row, column, cellValue }"
209
209
  >
210
210
  <div
211
- v-if="column === '{{ mainColumn }}'"
211
+ v-if="column === ' ' && '{{ mainColumn }}' === 'Batch size'"
212
212
  class="fflex"
213
213
  >
214
214
  <b>Batch size {{ '{{ cellValue }}' }}</b>
215
215
  </div>
216
+ <div
217
+ v-if="column === '{{ mainColumn }}'"
218
+ class="fflex"
219
+ >
220
+ <b>{{ '{{ cellValue }}' }}</b>
221
+ </div>
216
222
  </span>
217
223
  </sly-iw-table>
218
224
  </div>
@@ -103,6 +103,7 @@ class Widget:
103
103
  self.main_column = ""
104
104
  self.fixed_columns = 0
105
105
  self.show_header_controls = True
106
+ self.width = None
106
107
  super().__init__()
107
108
 
108
109
  class Gallery(BaseWidget):
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import os
3
4
  import json
4
5
  import pickle
5
6
  from typing import TYPE_CHECKING, Dict, List, Tuple
@@ -155,6 +156,7 @@ class Visualizer:
155
156
  cocoDt,
156
157
  )
157
158
  self.mp.calculate()
159
+ self._dump_key_metrics()
158
160
 
159
161
  self.df_score_profile = pd.DataFrame(
160
162
  self.mp.confidence_score_profile(), columns=["scores", "precision", "recall", "f1"]
@@ -187,6 +189,8 @@ class Visualizer:
187
189
 
188
190
  initialized = [mv(self) for mv in ALL_METRICS]
189
191
  if self.speedtest is not None:
192
+ if len(self.speedtest["speedtest"]) < 2:
193
+ SPEEDTEST_METRICS.pop()
190
194
  initialized = initialized + [mv(self) for mv in SPEEDTEST_METRICS]
191
195
  initialized = [mv for mv in initialized if self.cv_task.value in mv.cv_tasks]
192
196
  with self.pbar(
@@ -351,6 +355,13 @@ class Visualizer:
351
355
  json.dump(self._generate_state(metric_visualizations), f)
352
356
  logger.info("Saved: %r", "state.json")
353
357
 
358
+ def _dump_key_metrics(self):
359
+ key_metrics = self.mp.json_metrics()
360
+ path = os.path.join(self._benchmark.get_base_dir(), "evaluation", "key_metrics.json")
361
+ with open(path, "w", encoding="utf-8") as f:
362
+ json.dump(key_metrics, f)
363
+ return path
364
+
354
365
  def update_diff_annotations(self):
355
366
  meta = self._update_pred_meta_with_tags(self.dt_project_info.id, self.dt_project_meta)
356
367
  self._update_diff_meta(meta)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.206
3
+ Version: 6.73.207
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -721,7 +721,7 @@ supervisely/nn/artifacts/unet.py,sha256=Gn8ADfwC4F-MABVDPRY7g_ZaAIaaOAEbhhIGII-o
721
721
  supervisely/nn/artifacts/yolov5.py,sha256=6KDCyDlLO7AT9of1qHjCaG5mmxCv6C0p-zCk9KJ0PH4,1478
722
722
  supervisely/nn/artifacts/yolov8.py,sha256=c3MzbOTYD6RT5N4F9oZ0SWXxyonjJ6ZQfZLYUHPRZg4,1204
723
723
  supervisely/nn/benchmark/__init__.py,sha256=RxqbBx7cbzookq2DRvxYIaRofON9uxHeY5h8DqDbZq0,187
724
- supervisely/nn/benchmark/base_benchmark.py,sha256=f0TlgPdtl5-hWe38k1q4Jhld48kiDIhhWmGGJoO-FGA,22366
724
+ supervisely/nn/benchmark/base_benchmark.py,sha256=LoDsT_F86Y9xztrTyfz74FmT619_rrZVUKnEtTzav0A,22755
725
725
  supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
726
726
  supervisely/nn/benchmark/instance_segmentation_benchmark.py,sha256=9iiWEH7KDw7ps0mQQdzIrCtCKg4umHekF3ws7jIGjmE,938
727
727
  supervisely/nn/benchmark/object_detection_benchmark.py,sha256=s1S-L952etgz-UsDPyg69AgmFfAoJXvFHhITT8zB5iw,956
@@ -730,23 +730,24 @@ supervisely/nn/benchmark/coco_utils/__init__.py,sha256=MKxuzzBWpRCwR8kOb5NXUK8vD
730
730
  supervisely/nn/benchmark/coco_utils/sly2coco.py,sha256=iudlcHNynthscH-V5qwCLk6VgIcxYrMEuAfGIjrOjZ0,6867
731
731
  supervisely/nn/benchmark/coco_utils/utils.py,sha256=J9kM_Cn4XxfsrSQ8Rx6eb1UsS65-wOybaCkI9rQDeiU,504
732
732
  supervisely/nn/benchmark/evaluation/__init__.py,sha256=1NGV_xEGe9lyPdE5gJ8AASKzm2WyZ_jKlh9WVvCQIaY,287
733
- supervisely/nn/benchmark/evaluation/base_evaluator.py,sha256=Ac1EsvRrMH-Fck1aVS9T2Tx1m9PfialRA3z8XJs5e8U,1039
734
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=oskpLBSwo_u224m_fc-oVJD0GGWgsyR9HrBVEEQ-FAE,3101
735
- supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=5XrTnNpgdZgJ-LgXdUd74OvZLTQEenltxTSO12bTwqg,2943
733
+ supervisely/nn/benchmark/evaluation/base_evaluator.py,sha256=htei2QGHsx-1DO16j-lUjflmUFpfSIv0drhGfe60qAU,1995
734
+ supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=p2VC4WXB2RQlmeRt14u74QTXSOwhkyQPCznw7Kqe32k,3773
735
+ supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=0Sp0h9kNQUQYVRL3WrV9Vm7OniJPMYriEFbwcCejOBs,3615
736
736
  supervisely/nn/benchmark/evaluation/coco/__init__.py,sha256=l6dFxp9aenywosQzQkIaDEI1p-DDQ63OgJJXxSVB4Mk,172
737
- supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py,sha256=tgUAXngl0QcGpSdGvZRVo6f_0YP_PF4Leu2fpx5a_Us,10702
738
- supervisely/nn/benchmark/evaluation/coco/metric_provider.py,sha256=j4YMk20t3lsX3QnsSIRjEYx8EayHw77I4KdXxKfgxeI,17513
737
+ supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py,sha256=Pe5_bXJ57343PQ0TuYEkCMNUyp-YTyIXnPXUESuXQBk,11430
738
+ supervisely/nn/benchmark/evaluation/coco/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
739
+ supervisely/nn/benchmark/evaluation/coco/metric_provider.py,sha256=sEHY9mob6fjKG4_deMxu3lVGTNXhqbRDoFerBEwH5a0,19907
739
740
  supervisely/nn/benchmark/evaluation/coco/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
740
741
  supervisely/nn/benchmark/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
741
742
  supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=4QdBZqJmmPYdcB7x565zOtXhDFRyXIB4tpu0V-_otoc,3724
742
- supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=NJBduyKE0UPihPtM2BR2eOdDwkUWZ3lMBpZGy9UFOZ0,13859
743
- supervisely/nn/benchmark/visualization/vis_templates.py,sha256=tDPQcuByvnDdfGdDaT-KhemnKCtieunp-MgnGAPsbrQ,9905
744
- supervisely/nn/benchmark/visualization/vis_widgets.py,sha256=CsT7DSfxH4g4zHsmm_7RCJf3YR6zXiADuYhUIIGdn7w,4073
745
- supervisely/nn/benchmark/visualization/visualizer.py,sha256=BLu31ETO202AgpT1gjiAry-m_hk3ExzyFUsQtOHaeqU,31729
743
+ supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=hXnbGAijnZ700GuzfvaHBxU5elQR0wXkBUNbmcSWCno,13941
744
+ supervisely/nn/benchmark/visualization/vis_templates.py,sha256=Vy019K8G8oJ9vN35tvsSjYA21xdldqIP-BALGEuy_eM,10169
745
+ supervisely/nn/benchmark/visualization/vis_widgets.py,sha256=oavMM2Z-05Hp_Fj086NgXAqDq2KPAqXfT-nJb5qlDsg,4103
746
+ supervisely/nn/benchmark/visualization/visualizer.py,sha256=tmx1coNpKv9dDmJb-lHhdjuiSq0oRnFC0GvSHwcXV4E,32149
746
747
  supervisely/nn/benchmark/visualization/inference_speed/__init__.py,sha256=6Nahwt9R61_Jc1eWupXa70CgyRQ7tbUeiDWR26017rY,554
747
748
  supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py,sha256=73gbXs1uTfxxWH-UCJdR72m-48jMD5qVyMyolf5jNoc,6140
748
749
  supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py,sha256=ivUVriKyhx9ZtwVSqrAkUqq1SJGYYxNLwLQR1UgE4aM,900
749
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py,sha256=w-2F2lN89C2amHzPeI02WXvtjN6rdAMlRIgPLTR1f-U,4885
750
+ supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py,sha256=QLgiiHJzmasnNmn6OGWfLef01gLOiM84uVBK5P8c954,4887
750
751
  supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py,sha256=bVpNS3YBP0TGsqE_XQBuFMJI5ybDM0RZpEzFyT7cbkA,2157
751
752
  supervisely/nn/benchmark/visualization/text_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
752
753
  supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py,sha256=XGeBrbP-ROyKYbqYZzA281_IG45Ygu9NKyqG2I3o5TU,1124
@@ -765,7 +766,7 @@ supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py,sha256=3
765
766
  supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py,sha256=rsm_hdE0pYCHY-5v0pjDIid71y2tPbzYbmH2Qw-RS-4,3983
766
767
  supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py,sha256=lSb2-jfplyERIUCi8_6P9aq6C77JGOKOJK20J824sEE,5623
767
768
  supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py,sha256=YHfueea2EkUgNGP4FCyKyCaCtCwaYeYNJ3WwfF-Hzi4,3553
768
- supervisely/nn/benchmark/visualization/vis_metrics/overview.py,sha256=_TwrBnkEfw4zCn2Vr1BrOQf1ixsKYtN5aQY7seHBS8M,6141
769
+ supervisely/nn/benchmark/visualization/vis_metrics/overview.py,sha256=V-uNrtNhR5idywyfFSNOA4zFesTf5d6i2g8MNtLOhIw,6997
769
770
  supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py,sha256=mm8IVM90EoIC_9GsiM-Jyhh6jPqQcHMo788VAvRAzMY,1877
770
771
  supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py,sha256=4-AwEQk1ywuW4zXO_EXo7_aFMjenwhnLlGX2PWqiu0k,3574
771
772
  supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py,sha256=9Uuibo38HVGPChPbCW8i3cMYdb6-NFlys1TBisp5zOU,1442
@@ -956,9 +957,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
956
957
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
957
958
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
958
959
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
959
- supervisely-6.73.206.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
960
- supervisely-6.73.206.dist-info/METADATA,sha256=MQ9ZNLjLwIGBKpZiYf2S-F148rr3-bWJgg55ndrpleo,33077
961
- supervisely-6.73.206.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
962
- supervisely-6.73.206.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
963
- supervisely-6.73.206.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
964
- supervisely-6.73.206.dist-info/RECORD,,
960
+ supervisely-6.73.207.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
961
+ supervisely-6.73.207.dist-info/METADATA,sha256=jPDiqTxPbkSOwBCqz4V6xiMiBRiQ-n5fB6ZxFAaBYVw,33077
962
+ supervisely-6.73.207.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
963
+ supervisely-6.73.207.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
964
+ supervisely-6.73.207.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
965
+ supervisely-6.73.207.dist-info/RECORD,,