supervisely 6.73.282__py3-none-any.whl → 6.73.284__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (32) hide show
  1. supervisely/convert/base_converter.py +1 -0
  2. supervisely/convert/image/cityscapes/cityscapes_converter.py +8 -0
  3. supervisely/convert/image/coco/coco_anntotation_converter.py +8 -0
  4. supervisely/convert/image/coco/coco_converter.py +6 -1
  5. supervisely/convert/image/coco/coco_helper.py +1 -1
  6. supervisely/convert/image/image_converter.py +14 -14
  7. supervisely/convert/image/multi_view/multi_view.py +17 -2
  8. supervisely/convert/image/yolo/yolo_converter.py +7 -1
  9. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +12 -18
  10. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +5 -5
  11. supervisely/nn/benchmark/base_benchmark.py +13 -2
  12. supervisely/nn/benchmark/base_evaluator.py +2 -0
  13. supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py +5 -0
  14. supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py +25 -0
  15. supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py +9 -3
  16. supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -0
  17. supervisely/nn/benchmark/instance_segmentation/text_templates.py +7 -0
  18. supervisely/nn/benchmark/object_detection/evaluator.py +15 -3
  19. supervisely/nn/benchmark/object_detection/metric_provider.py +21 -1
  20. supervisely/nn/benchmark/object_detection/text_templates.py +7 -0
  21. supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py +12 -0
  22. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +41 -2
  23. supervisely/nn/benchmark/object_detection/visualizer.py +20 -0
  24. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +1 -0
  25. supervisely/nn/benchmark/utils/detection/calculate_metrics.py +31 -33
  26. supervisely/nn/benchmark/visualization/renderer.py +2 -0
  27. {supervisely-6.73.282.dist-info → supervisely-6.73.284.dist-info}/METADATA +1 -1
  28. {supervisely-6.73.282.dist-info → supervisely-6.73.284.dist-info}/RECORD +32 -32
  29. {supervisely-6.73.282.dist-info → supervisely-6.73.284.dist-info}/LICENSE +0 -0
  30. {supervisely-6.73.282.dist-info → supervisely-6.73.284.dist-info}/WHEEL +0 -0
  31. {supervisely-6.73.282.dist-info → supervisely-6.73.284.dist-info}/entry_points.txt +0 -0
  32. {supervisely-6.73.282.dist-info → supervisely-6.73.284.dist-info}/top_level.txt +0 -0
@@ -168,6 +168,7 @@ class BaseConverter:
168
168
  self._upload_as_links: bool = upload_as_links
169
169
  self._remote_files_map: Optional[Dict[str, str]] = remote_files_map
170
170
  self._supports_links = False # if converter supports uploading by links
171
+ self._force_shape_for_links = False
171
172
  self._api = Api.from_env() if self._upload_as_links else None
172
173
  self._team_id = team_id() if self._upload_as_links else None
173
174
  self._converter = None
@@ -34,6 +34,8 @@ class CityscapesConverter(ImageConverter):
34
34
  super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
35
35
 
36
36
  self._classes_mapping = {}
37
+ self._supports_links = True
38
+ self._force_shape_for_links = self.upload_as_links
37
39
 
38
40
  def __str__(self):
39
41
  return AvailableImageConverters.CITYSCAPES
@@ -41,6 +43,10 @@ class CityscapesConverter(ImageConverter):
41
43
  @property
42
44
  def key_file_ext(self) -> str:
43
45
  return ".json"
46
+
47
+ @property
48
+ def ann_ext(self) -> str:
49
+ return ".json"
44
50
 
45
51
  def ann_file_ext(self) -> str:
46
52
  return ".json"
@@ -108,6 +114,8 @@ class CityscapesConverter(ImageConverter):
108
114
  return False
109
115
 
110
116
  def validate_format(self) -> bool:
117
+ if self.upload_as_links:
118
+ self._download_remote_ann_files()
111
119
  detected_ann_cnt = 0
112
120
  images_list, ann_dict = [], {}
113
121
  for root, _, files in os.walk(self._input_data):
@@ -37,6 +37,7 @@ class FastCOCOConverter(COCOConverter, ImageConverter):
37
37
  self._items = []
38
38
  meta = ProjectMeta()
39
39
  warnings = defaultdict(list)
40
+ item_names = set()
40
41
  for ann_path in ann_paths:
41
42
  try:
42
43
  with coco_helper.HiddenCocoPrints():
@@ -74,11 +75,18 @@ class FastCOCOConverter(COCOConverter, ImageConverter):
74
75
  coco_ann = coco_anns[image_id]
75
76
  if len(coco_ann) == 0 or coco_ann is None or image_name is None:
76
77
  continue
78
+ if image_name in item_names:
79
+ # * Block to handle the case when there are mixed annotations: caption and segmentations for the same images
80
+ item = next(item for item in self._items if item.name == image_name)
81
+ if item.shape == (height, width):
82
+ item.ann_data.extend(coco_ann)
83
+ continue
77
84
  item = self.Item(image_name) if image_url is None else self.Item(image_url)
78
85
  item.name = image_name
79
86
  item.ann_data = coco_ann
80
87
  item.set_shape((height, width))
81
88
  self._items.append(item)
89
+ item_names.add(image_name)
82
90
  detected_ann_cnt += len(coco_ann)
83
91
 
84
92
  self._meta = meta
@@ -25,6 +25,8 @@ class COCOConverter(ImageConverter):
25
25
  super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
26
26
 
27
27
  self._coco_categories = []
28
+ self._supports_links = True
29
+ self._force_shape_for_links = self.upload_as_links
28
30
 
29
31
  def __str__(self) -> str:
30
32
  return AvailableImageConverters.COCO
@@ -56,6 +58,8 @@ class COCOConverter(ImageConverter):
56
58
  def validate_format(self) -> bool:
57
59
  from pycocotools.coco import COCO # pylint: disable=import-error
58
60
 
61
+ if self.upload_as_links:
62
+ self._download_remote_ann_files()
59
63
  detected_ann_cnt = 0
60
64
  images_list, ann_paths = [], []
61
65
  for root, _, files in os.walk(self._input_data):
@@ -145,7 +149,8 @@ class COCOConverter(ImageConverter):
145
149
  if item.ann_data is None:
146
150
  return Annotation.from_img_path(item.path)
147
151
  else:
148
- item.set_shape()
152
+ if not self.upload_as_links:
153
+ item.set_shape()
149
154
  ann = coco_helper.create_supervisely_annotation(
150
155
  item,
151
156
  meta,
@@ -182,7 +182,7 @@ def convert_rle_mask_to_polygon(coco_ann):
182
182
  return Bitmap(mask).to_contours()
183
183
 
184
184
 
185
- def convert_polygon_vertices(coco_ann, image_size):
185
+ def convert_polygon_vertices(coco_ann, image_size: Tuple[int, int]):
186
186
  polygons = coco_ann["segmentation"]
187
187
  if all(type(coord) is float for coord in polygons):
188
188
  polygons = [polygons]
@@ -136,20 +136,16 @@ class ImageConverter(BaseConverter):
136
136
  item_names = []
137
137
  item_paths = []
138
138
  item_metas = []
139
- anns = []
140
139
  for item in batch:
141
140
  item.path = self.validate_image(item.path)
142
141
  if item.path is None:
143
142
  continue # image has failed validation
144
- item.name = f"{get_file_name(item.path)}{get_file_ext(item.path).lower()}"
145
- if self.upload_as_links and not self.supports_links:
146
- ann = None
147
- else:
148
- ann = self.to_supervisely(item, meta, renamed_classes, renamed_tags)
149
- name = generate_free_name(
150
- existing_names, item.name, with_ext=True, extend_used_names=True
143
+ name = f"{get_file_name(item.path)}{get_file_ext(item.path).lower()}"
144
+
145
+ item.name = generate_free_name(
146
+ existing_names, name, with_ext=True, extend_used_names=True
151
147
  )
152
- item_names.append(name)
148
+ item_names.append(item.name)
153
149
  item_paths.append(item.path)
154
150
 
155
151
  if isinstance(item.meta, str): # path to file
@@ -159,9 +155,6 @@ class ImageConverter(BaseConverter):
159
155
  else:
160
156
  item_metas.append({})
161
157
 
162
- if ann is not None:
163
- anns.append(ann)
164
-
165
158
  with ApiContext(
166
159
  api=api, project_id=project_id, dataset_id=dataset_id, project_meta=meta
167
160
  ):
@@ -173,7 +166,7 @@ class ImageConverter(BaseConverter):
173
166
  metas=item_metas,
174
167
  batch_size=batch_size,
175
168
  conflict_resolution="rename",
176
- force_metadata_for_links=False,
169
+ force_metadata_for_links=self._force_shape_for_links,
177
170
  )
178
171
  else:
179
172
  img_infos = api.image.upload_paths(
@@ -183,8 +176,15 @@ class ImageConverter(BaseConverter):
183
176
  metas=item_metas,
184
177
  conflict_resolution="rename",
185
178
  )
186
-
187
179
  img_ids = [img_info.id for img_info in img_infos]
180
+
181
+ anns = []
182
+ if not (self.upload_as_links and not self.supports_links):
183
+ for info, item in zip(img_infos, batch):
184
+ if self._force_shape_for_links:
185
+ item.set_shape((info.height, info.width))
186
+ anns.append(self.to_supervisely(item, meta, renamed_classes, renamed_tags))
187
+
188
188
  if len(anns) == len(img_ids):
189
189
  api.annotation.upload_anns(
190
190
  img_ids, anns, skip_bounds_validation=self.upload_as_links
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  from collections import defaultdict
3
- from typing import Dict, Union
3
+ from typing import Dict, Union, Optional
4
4
 
5
5
  from supervisely import ProjectMeta, generate_free_name, is_development, logger
6
6
  from supervisely.api.api import Api, ApiContext
@@ -13,6 +13,18 @@ from supervisely.project.project_settings import LabelingInterface
13
13
 
14
14
  class MultiViewImageConverter(ImageConverter):
15
15
 
16
+ def __init__(
17
+ self,
18
+ input_data: str,
19
+ labeling_interface: Optional[Union[LabelingInterface, str]],
20
+ upload_as_links: bool,
21
+ remote_files_map: Optional[Dict[str, str]] = None,
22
+ ):
23
+ super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
24
+
25
+ self._supports_links = True
26
+ self._force_shape_for_links = self.upload_as_links
27
+
16
28
  def __str__(self):
17
29
  return AvailableImageConverters.MULTI_VIEW
18
30
 
@@ -79,13 +91,16 @@ class MultiViewImageConverter(ImageConverter):
79
91
  logger.warn(f"Image '{name}' already exists. Renamed to '{new_name}'.")
80
92
  os.rename(image, os.path.join(group_path, new_name))
81
93
  image = os.path.join(group_path, new_name)
94
+ if self._upload_as_links:
95
+ image = self.remote_files_map.get(image, image)
82
96
  images.append(image)
83
97
 
84
98
  with ApiContext(
85
99
  api=api, project_id=project_id, dataset_id=dataset_id, project_meta=meta
86
100
  ):
101
+ kwarg = {"links": images} if self.upload_as_links else {"paths": images}
87
102
  api.image.upload_multiview_images(
88
- dataset.id, group_name, images, progress_cb=progress_cb
103
+ dataset.id, group_name, **kwarg, progress_cb=progress_cb
89
104
  )
90
105
 
91
106
  if log_progress:
@@ -38,6 +38,8 @@ class YOLOConverter(ImageConverter):
38
38
  self._coco_classes_dict: dict = {}
39
39
  self._num_kpts = None
40
40
  self._num_dims = None
41
+ self._supports_links = True
42
+ self._force_shape_for_links = self.upload_as_links
41
43
 
42
44
  def __str__(self) -> str:
43
45
  return AvailableImageConverters.YOLO
@@ -151,6 +153,9 @@ class YOLOConverter(ImageConverter):
151
153
  return False
152
154
 
153
155
  def validate_format(self) -> bool:
156
+ if self.upload_as_links:
157
+ self._download_remote_ann_files()
158
+
154
159
  detected_ann_cnt = 0
155
160
  config_path = None
156
161
  images_list, ann_dict = [], {}
@@ -238,7 +243,8 @@ class YOLOConverter(ImageConverter):
238
243
 
239
244
  try:
240
245
  labels = []
241
- item.set_shape()
246
+ if not self.upload_as_links:
247
+ item.set_shape()
242
248
  height, width = item.shape
243
249
  with open(item.ann_data, "r") as ann_file:
244
250
  lines = ann_file.readlines()
@@ -61,35 +61,27 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
61
61
  return False
62
62
 
63
63
  def filter_fn(path):
64
- return all(
65
- [
66
- (Path(path) / name).exists()
67
- for name in ["maps", "samples", "sweeps", "v1.0-mini"]
68
- ]
69
- )
64
+ return all([(Path(path) / name).exists() for name in ["maps", "samples"]])
70
65
 
71
- try:
72
- input_path = [d for d in fs.dirs_filter(self._input_data, filter_fn)].pop()
73
- except IndexError:
66
+ input_path = next((d for d in fs.dirs_filter(self._input_data, filter_fn)), None)
67
+ if input_path is None:
74
68
  return False
75
69
 
76
70
  sample_dir = input_path + "/samples/"
77
71
  if any([not fs.dir_exists(f"{sample_dir}/{d}") for d in helpers.DIR_NAMES]):
78
72
  return False
79
73
 
80
- sweeps_dir = input_path + "/sweeps/"
81
- if any([not fs.dir_exists(f"{sweeps_dir}/{d}") for d in helpers.DIR_NAMES]):
82
- return False
83
-
84
- ann_dir = input_path + "/v1.0-mini/"
85
- if any([not fs.file_exists(f"{ann_dir}/{d}.json") for d in helpers.TABLE_NAMES]):
74
+ fil_fn = lambda p: all(fs.file_exists(f"{p}/{name}.json") for name in helpers.TABLE_NAMES)
75
+ ann_dir = next((d for d in fs.dirs_filter(input_path, fil_fn)), None)
76
+ if ann_dir is None:
86
77
  return False
87
78
 
79
+ version = osp.basename(ann_dir)
88
80
  try:
89
81
  t = TinyTimer()
90
- nuscenes = NuScenes(dataroot=input_path, verbose=False)
82
+ nuscenes = NuScenes(version=version, dataroot=input_path, verbose=False)
91
83
  self._nuscenes: NuScenes = nuscenes
92
- logger.info(f"NuScenes initialization took {t.get_sec():.3f} sec")
84
+ logger.debug(f"NuScenes initialization took {t.get_sec():.3f} sec")
93
85
  except Exception as e:
94
86
  logger.debug(f"Failed to initialize NuScenes: {e}")
95
87
  return False
@@ -184,7 +176,9 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
184
176
  scene_name_to_dataset[scene_names[0]] = dataset_info
185
177
 
186
178
  if log_progress:
187
- progress, progress_cb = self.get_progress(total_sample_cnt, "Converting episode scenes...")
179
+ progress, progress_cb = self.get_progress(
180
+ total_sample_cnt, "Converting episode scenes..."
181
+ )
188
182
  else:
189
183
  progress_cb = None
190
184
 
@@ -16,11 +16,11 @@ DIR_NAMES = [
16
16
  "CAM_FRONT_LEFT",
17
17
  "CAM_FRONT_RIGHT",
18
18
  "LIDAR_TOP",
19
- "RADAR_FRONT",
20
- "RADAR_FRONT_LEFT",
21
- "RADAR_FRONT_RIGHT",
22
- "RADAR_BACK_LEFT",
23
- "RADAR_BACK_RIGHT",
19
+ # "RADAR_FRONT",
20
+ # "RADAR_FRONT_LEFT",
21
+ # "RADAR_FRONT_RIGHT",
22
+ # "RADAR_BACK_LEFT",
23
+ # "RADAR_BACK_RIGHT",
24
24
  ]
25
25
 
26
26
  TABLE_NAMES = [
@@ -1,5 +1,6 @@
1
1
  import os
2
- from typing import Callable, List, Optional, Tuple, Union
2
+ from pathlib import Path
3
+ from typing import Callable, List, Optional, Tuple, Union, Type
3
4
 
4
5
  import numpy as np
5
6
 
@@ -80,7 +81,7 @@ class BaseBenchmark:
80
81
  self.report_id = None
81
82
  self._validate_evaluation_params()
82
83
 
83
- def _get_evaluator_class(self) -> type:
84
+ def _get_evaluator_class(self) -> Type[BaseEvaluator]:
84
85
  raise NotImplementedError()
85
86
 
86
87
  @property
@@ -95,6 +96,10 @@ class BaseBenchmark:
95
96
  def key_metrics(self):
96
97
  eval_results = self.get_eval_result()
97
98
  return eval_results.key_metrics
99
+
100
+ @property
101
+ def primary_metric_name(self) -> str:
102
+ return self._get_evaluator_class().eval_result_cls.PRIMARY_METRIC
98
103
 
99
104
  def run_evaluation(
100
105
  self,
@@ -492,6 +497,8 @@ class BaseBenchmark:
492
497
  "It should be defined in the subclass of BaseBenchmark (e.g. ObjectDetectionBenchmark)."
493
498
  )
494
499
  eval_result = self.get_eval_result()
500
+ self._dump_key_metrics(eval_result)
501
+
495
502
  layout_dir = self.get_layout_results_dir()
496
503
  self.visualizer = self.visualizer_cls( # pylint: disable=not-callable
497
504
  self.api, [eval_result], layout_dir, self.pbar
@@ -621,3 +628,7 @@ class BaseBenchmark:
621
628
  self.diff_project_info = eval_result.diff_project_info
622
629
  return self.diff_project_info
623
630
  return None
631
+
632
+ def _dump_key_metrics(self, eval_result: BaseEvaluator):
633
+ path = str(Path(self.get_eval_results_dir(), "key_metrics.json"))
634
+ json.dump_json_file(eval_result.key_metrics, path)
@@ -12,6 +12,8 @@ from supervisely.task.progress import tqdm_sly
12
12
 
13
13
 
14
14
  class BaseEvalResult:
15
+ PRIMARY_METRIC = None
16
+
15
17
  def __init__(self, directory: Optional[str] = None):
16
18
  self.directory = directory
17
19
  self.inference_info: Dict = None
@@ -87,6 +87,11 @@ In this section you can visually assess the model performance through examples.
87
87
  > Filtering options allow you to adjust the confidence threshold (only for predictions) and the model's false outcomes (only for differences). Differences are calculated only for the optimal confidence threshold, allowing you to focus on the most accurate predictions made by the model.
88
88
  """
89
89
 
90
+ markdown_different_iou_thresholds_warning = """### IoU Thresholds Mismatch
91
+
92
+ <i class="zmdi zmdi-alert-polygon" style="color: #f5a623; margin-right: 5px"></i> The models were evaluated using different IoU thresholds. Since these thresholds varied between models and classes, it may have led to unfair comparison. For fair model comparison, we suggest using the same IoU threshold across models.
93
+ """
94
+
90
95
  markdown_explore_difference = """## Explore Predictions
91
96
 
92
97
  In this section, you can explore predictions made by different models side-by-side. This helps you to understand the differences in predictions made by each model, and to identify which model performs better in different scenarios.
@@ -1,3 +1,4 @@
1
+ from collections import defaultdict
1
2
  from typing import List
2
3
 
3
4
  from supervisely._utils import abs_url
@@ -15,6 +16,7 @@ class Overview(BaseVisMetrics):
15
16
  MARKDOWN_OVERVIEW = "markdown_overview"
16
17
  MARKDOWN_OVERVIEW_INFO = "markdown_overview_info"
17
18
  MARKDOWN_COMMON_OVERVIEW = "markdown_common_overview"
19
+ MARKDOWN_DIFF_IOU = "markdown_different_iou_thresholds_warning"
18
20
  CHART = "chart_key_metrics"
19
21
 
20
22
  def __init__(self, vis_texts, eval_results: List[EvalResult]) -> None:
@@ -237,3 +239,26 @@ class Overview(BaseVisMetrics):
237
239
  ),
238
240
  )
239
241
  return fig
242
+
243
+ @property
244
+ def not_matched_iou_per_class_thresholds_md(self) -> MarkdownWidget:
245
+ if all([not r.different_iou_thresholds_per_class for r in self.eval_results]):
246
+ return None
247
+
248
+ iou_thrs_map = defaultdict(set)
249
+ matched = True
250
+ for eval_result in self.eval_results:
251
+ for cat_id, iou_thr in eval_result.mp.iou_threshold_per_class.items():
252
+ iou_thrs_map[cat_id].add(iou_thr)
253
+ if len(iou_thrs_map[cat_id]) > 1:
254
+ matched = False
255
+ break
256
+
257
+ if matched:
258
+ return None
259
+
260
+ return MarkdownWidget(
261
+ name="markdown_different_iou_thresholds_warning",
262
+ title="IoU per class thresholds mismatch",
263
+ text=self.vis_texts.markdown_different_iou_thresholds_warning,
264
+ )
@@ -50,10 +50,9 @@ class DetectionComparisonVisualizer(BaseComparisonVisualizer):
50
50
  self.overviews = self._create_overviews(overview)
51
51
  self.overview_md = overview.overview_md
52
52
  self.key_metrics_md = self._create_key_metrics()
53
- self.key_metrics_table = overview.get_table_widget(
54
- latency=speedtest.latency, fps=speedtest.fps
55
- )
53
+ self.key_metrics_table = overview.get_table_widget(speedtest.latency, speedtest.fps)
56
54
  self.overview_chart = overview.chart_widget
55
+ self.iou_per_class_thresholds_md = overview.not_matched_iou_per_class_thresholds_md
57
56
 
58
57
  columns_number = len(self.comparison.eval_results) + 1 # +1 for GT
59
58
  self.explore_predictions_modal_gallery = self._create_explore_modal_table(columns_number)
@@ -154,6 +153,13 @@ class DetectionComparisonVisualizer(BaseComparisonVisualizer):
154
153
  (0, self.header),
155
154
  (1, self.overview_md),
156
155
  (0, self.overviews),
156
+ ]
157
+
158
+ if self.iou_per_class_thresholds_md is not None:
159
+ is_anchors_widgets.append((0, self.iou_per_class_thresholds_md))
160
+
161
+ is_anchors_widgets += [
162
+ # Key Metrics
157
163
  (1, self.key_metrics_md),
158
164
  (0, self.key_metrics_table),
159
165
  (0, self.overview_chart),
@@ -14,6 +14,7 @@ from supervisely.nn.benchmark.utils import calculate_metrics, read_coco_datasets
14
14
 
15
15
  class InstanceSegmentationEvalResult(ObjectDetectionEvalResult):
16
16
  mp_cls = MetricProvider
17
+ PRIMARY_METRIC = "mAP"
17
18
 
18
19
  @classmethod
19
20
  def from_evaluator(
@@ -60,6 +60,13 @@ Here, we comprehensively assess the model's performance by presenting a broad se
60
60
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
61
61
  """
62
62
 
63
+ markdown_AP_custom_description = """> * AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
64
+
65
+ markdown_iou_per_class = """### IoU Threshold per Class
66
+
67
+ The model is evaluated using different IoU thresholds for each class.
68
+ """
69
+
63
70
  markdown_explorer = """## Explore Predictions
64
71
  In this section you can visually assess the model performance through examples. This helps users better understand model capabilities and limitations, giving an intuitive grasp of prediction quality in different scenarios.
65
72
 
@@ -19,6 +19,7 @@ from supervisely.nn.benchmark.visualization.vis_click_data import ClickData, IdM
19
19
 
20
20
  class ObjectDetectionEvalResult(BaseEvalResult):
21
21
  mp_cls = MetricProvider
22
+ PRIMARY_METRIC = "mAP"
22
23
 
23
24
  def _read_files(self, path: str) -> None:
24
25
  """Read all necessary files from the directory"""
@@ -92,6 +93,10 @@ class ObjectDetectionEvalResult(BaseEvalResult):
92
93
  def key_metrics(self):
93
94
  return self.mp.key_metrics()
94
95
 
96
+ @property
97
+ def different_iou_thresholds_per_class(self) -> bool:
98
+ return self.mp.iou_threshold_per_class is not None
99
+
95
100
 
96
101
  class ObjectDetectionEvaluator(BaseEvaluator):
97
102
  EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/evaluation_params.yaml"
@@ -120,12 +125,19 @@ class ObjectDetectionEvaluator(BaseEvaluator):
120
125
 
121
126
  @classmethod
122
127
  def validate_evaluation_params(cls, evaluation_params: dict) -> None:
128
+ available_thres = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
123
129
  iou_threshold = evaluation_params.get("iou_threshold")
124
130
  if iou_threshold is not None:
125
- assert iou_threshold in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], (
126
- f"iou_threshold must be one of [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], "
127
- f"but got {iou_threshold}"
131
+ assert iou_threshold in available_thres, (
132
+ f"iou_threshold must be one of {available_thres}, " f"but got {iou_threshold}"
128
133
  )
134
+ iou_threshold_per_class = evaluation_params.get("iou_threshold_per_class")
135
+ if iou_threshold_per_class is not None:
136
+ for class_name, iou_thres in iou_threshold_per_class.items():
137
+ assert iou_thres in available_thres, (
138
+ f"class {class_name}: iou_threshold_per_class must be one of {available_thres}, "
139
+ f"but got {iou_thres}"
140
+ )
129
141
 
130
142
  def _convert_to_coco(self):
131
143
  cocoGt_json = sly2coco(
@@ -92,7 +92,11 @@ class MetricProvider:
92
92
 
93
93
  eval_params = params.get("evaluation_params", {})
94
94
  self.iou_threshold = eval_params.get("iou_threshold", 0.5)
95
- self.iou_threshold_idx = np.searchsorted(self.iouThrs, self.iou_threshold)
95
+ self.iou_threshold_idx = np.where(np.isclose(self.iouThrs, self.iou_threshold))[0][0]
96
+
97
+ # IoU per class (optional)
98
+ self.iou_threshold_per_class = eval_params.get("iou_threshold_per_class")
99
+ self.iou_idx_per_class = params.get("iou_idx_per_class") # {cat id: iou_idx}
96
100
 
97
101
  def calculate(self):
98
102
  self.m_full = _MetricProvider(
@@ -142,6 +146,8 @@ class MetricProvider:
142
146
  def json_metrics(self):
143
147
  base = self.base_metrics()
144
148
  iou_name = int(self.iou_threshold * 100)
149
+ if self.iou_threshold_per_class is not None:
150
+ iou_name = "_custom"
145
151
  ap_by_class = self.AP_per_class().tolist()
146
152
  ap_by_class = dict(zip(self.cat_names, ap_by_class))
147
153
  ap_custom_by_class = self.AP_custom_per_class().tolist()
@@ -166,6 +172,8 @@ class MetricProvider:
166
172
 
167
173
  def key_metrics(self):
168
174
  iou_name = int(self.iou_threshold * 100)
175
+ if self.iou_threshold_per_class is not None:
176
+ iou_name = "_custom"
169
177
  json_metrics = self.json_metrics()
170
178
  json_metrics.pop("AP_by_class")
171
179
  json_metrics.pop(f"AP{iou_name}_by_class")
@@ -174,6 +182,8 @@ class MetricProvider:
174
182
  def metric_table(self):
175
183
  table = self.json_metrics()
176
184
  iou_name = int(self.iou_threshold * 100)
185
+ if self.iou_threshold_per_class is not None:
186
+ iou_name = "_custom"
177
187
  return {
178
188
  "mAP": table["mAP"],
179
189
  "AP50": table["AP50"],
@@ -196,6 +206,10 @@ class MetricProvider:
196
206
 
197
207
  def AP_custom_per_class(self):
198
208
  s = self.coco_precision[self.iou_threshold_idx, :, :, 0, 2]
209
+ s = s.copy()
210
+ if self.iou_threshold_per_class is not None:
211
+ for cat_id, iou_idx in self.iou_idx_per_class.items():
212
+ s[:, cat_id - 1] = self.coco_precision[iou_idx, :, cat_id - 1, 0, 2]
199
213
  s[s == -1] = np.nan
200
214
  ap = np.nanmean(s, axis=0)
201
215
  return ap
@@ -280,6 +294,9 @@ class _MetricProvider:
280
294
  ious.append(match["iou"])
281
295
  cats.append(cat_id_to_idx[match["category_id"]])
282
296
  ious = np.array(ious) + np.spacing(1)
297
+ if 0.8999999999999999 in iouThrs:
298
+ iouThrs = iouThrs.copy()
299
+ iouThrs[iouThrs == 0.8999999999999999] = 0.9
283
300
  iou_idxs = np.searchsorted(iouThrs, ious) - 1
284
301
  cats = np.array(cats)
285
302
  # TP
@@ -452,6 +469,9 @@ class _MetricProvider:
452
469
  )
453
470
  scores = np.array([m["score"] for m in matches_sorted])
454
471
  ious = np.array([m["iou"] if m["type"] == "TP" else 0.0 for m in matches_sorted])
472
+ if 0.8999999999999999 in iouThrs:
473
+ iouThrs = iouThrs.copy()
474
+ iouThrs[iouThrs == 0.8999999999999999] = 0.9
455
475
  iou_idxs = np.searchsorted(iouThrs, ious + np.spacing(1))
456
476
 
457
477
  # Check
@@ -65,6 +65,13 @@ Here, we comprehensively assess the model's performance by presenting a broad se
65
65
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
66
66
  """
67
67
 
68
+ markdown_AP_custom_description = """> * AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
69
+
70
+ markdown_iou_per_class = """### IoU Threshold per Class
71
+
72
+ The model is evaluated using different IoU thresholds for each class.
73
+ """
74
+
68
75
  markdown_explorer = """## Explore Predictions
69
76
  In this section you can visually assess the model performance through examples. This helps users better understand model capabilities and limitations, giving an intuitive grasp of prediction quality in different scenarios.
70
77
 
@@ -29,6 +29,8 @@ class KeyMetrics(DetectionVisMetric):
29
29
  columns = ["metrics", "values"]
30
30
  content = []
31
31
  for metric, value in self.eval_result.mp.metric_table().items():
32
+ if metric == "AP_custom":
33
+ metric += "*"
32
34
  row = [metric, round(value, 2)]
33
35
  dct = {
34
36
  "row": row,
@@ -134,3 +136,13 @@ class KeyMetrics(DetectionVisMetric):
134
136
  ]
135
137
 
136
138
  return res
139
+
140
+ @property
141
+ def custom_ap_description_md(self) -> MarkdownWidget:
142
+ if not self.eval_result.different_iou_thresholds_per_class:
143
+ return None
144
+ return MarkdownWidget(
145
+ "custom_ap_description",
146
+ "Custom AP per Class",
147
+ self.vis_texts.markdown_AP_custom_description,
148
+ )
@@ -2,7 +2,7 @@ import datetime
2
2
  from typing import List
3
3
 
4
4
  from supervisely.nn.benchmark.object_detection.base_vis_metric import DetectionVisMetric
5
- from supervisely.nn.benchmark.visualization.widgets import MarkdownWidget
5
+ from supervisely.nn.benchmark.visualization.widgets import MarkdownWidget, TableWidget
6
6
 
7
7
 
8
8
  class Overview(DetectionVisMetric):
@@ -32,6 +32,10 @@ class Overview(DetectionVisMetric):
32
32
  # link to scroll to the optimal confidence section
33
33
  opt_conf_url = self.vis_texts.docs_url + "#f1-optimal-confidence-threshold"
34
34
 
35
+ iou_threshold = self.eval_result.mp.iou_threshold
36
+ if self.eval_result.different_iou_thresholds_per_class:
37
+ iou_threshold = "Different IoU thresholds for each class (see the table below)"
38
+
35
39
  formats = [
36
40
  model_name.replace("_", "\_"),
37
41
  checkpoint_name.replace("_", "\_"),
@@ -45,7 +49,7 @@ class Overview(DetectionVisMetric):
45
49
  classes_str,
46
50
  note_about_images,
47
51
  starter_app_info,
48
- self.eval_result.mp.iou_threshold,
52
+ iou_threshold,
49
53
  round(self.eval_result.mp.f1_optimal_conf, 4),
50
54
  opt_conf_url,
51
55
  self.vis_texts.docs_url,
@@ -112,3 +116,38 @@ class Overview(DetectionVisMetric):
112
116
  starter_app_info = train_session or evaluator_session or ""
113
117
 
114
118
  return classes_str, images_str, starter_app_info
119
+
120
+ @property
121
+ def iou_per_class_md(self) -> List[MarkdownWidget]:
122
+ if not self.eval_result.different_iou_thresholds_per_class:
123
+ return None
124
+
125
+ return MarkdownWidget(
126
+ "markdown_iou_per_class",
127
+ "Different IoU thresholds for each class",
128
+ text=self.vis_texts.markdown_iou_per_class,
129
+ )
130
+
131
+ @property
132
+ def iou_per_class_table(self) -> TableWidget:
133
+ if not self.eval_result.different_iou_thresholds_per_class:
134
+ return None
135
+
136
+ content = []
137
+ for name, thr in self.eval_result.mp.iou_threshold_per_class.items():
138
+ row = [name, round(thr, 2)]
139
+ dct = {"row": row, "id": name, "items": row}
140
+ content.append(dct)
141
+
142
+ data = {
143
+ "columns": ["Class name", "IoU threshold"],
144
+ "columnsOptions": [{"disableSort": True}, {}],
145
+ "content": content,
146
+ }
147
+ return TableWidget(
148
+ name="table_iou_per_class",
149
+ data=data,
150
+ fix_columns=1,
151
+ width="60%",
152
+ main_column="Class name",
153
+ )
@@ -90,11 +90,16 @@ class ObjectDetectionVisualizer(BaseVisualizer):
90
90
  self.header = overview.get_header(me.login)
91
91
  self.overview_md = overview.md
92
92
 
93
+ # IOU Per Class (optional)
94
+ self.iou_per_class_md = overview.iou_per_class_md
95
+ self.iou_per_class_table = overview.iou_per_class_table
96
+
93
97
  # Key Metrics
94
98
  key_metrics = KeyMetrics(self.vis_texts, self.eval_result)
95
99
  self.key_metrics_md = key_metrics.md
96
100
  self.key_metrics_table = key_metrics.table
97
101
  self.overview_chart = key_metrics.chart
102
+ self.custom_ap_description = key_metrics.custom_ap_description_md
98
103
 
99
104
  # Explore Predictions
100
105
  explore_predictions = ExplorePredictions(
@@ -238,9 +243,24 @@ class ObjectDetectionVisualizer(BaseVisualizer):
238
243
  # Overview
239
244
  (0, self.header),
240
245
  (1, self.overview_md),
246
+ ]
247
+
248
+ if self.iou_per_class_table is not None:
249
+ is_anchors_widgets += [
250
+ (0, self.iou_per_class_md),
251
+ (0, self.iou_per_class_table),
252
+ ]
253
+
254
+ is_anchors_widgets += [
241
255
  # KeyMetrics
242
256
  (1, self.key_metrics_md),
243
257
  (0, self.key_metrics_table),
258
+ ]
259
+
260
+ if self.custom_ap_description is not None:
261
+ is_anchors_widgets.append((0, self.custom_ap_description))
262
+
263
+ is_anchors_widgets += [
244
264
  (0, self.overview_chart),
245
265
  # ExplorePredictions
246
266
  (1, self.explore_predictions_md),
@@ -25,6 +25,7 @@ from supervisely.sly_logger import logger
25
25
 
26
26
  class SemanticSegmentationEvalResult(BaseEvalResult):
27
27
  mp_cls = MetricProvider
28
+ PRIMARY_METRIC = "mIoU"
28
29
 
29
30
  def _read_files(self, path: str) -> None:
30
31
  """Read all necessary files from the directory"""
@@ -48,8 +48,11 @@ def calculate_metrics(
48
48
  :return: Results of the evaluation
49
49
  :rtype: dict
50
50
  """
51
+ from pycocotools.coco import COCO # pylint: disable=import-error
51
52
  from pycocotools.cocoeval import COCOeval # pylint: disable=import-error
52
53
 
54
+ cocoGt: COCO = cocoGt
55
+
53
56
  cocoEval = COCOeval(cocoGt, cocoDt, iouType=iouType)
54
57
  cocoEval.evaluate()
55
58
  progress_cb(1) if progress_cb is not None else None
@@ -66,23 +69,33 @@ def calculate_metrics(
66
69
  progress_cb(1) if progress_cb is not None else None
67
70
  cocoEval_cls.summarize()
68
71
 
69
- iou_t = 0
70
- is_custom_iou_threshold = (
71
- evaluation_params is not None
72
- and evaluation_params.get("iou_threshold")
73
- and evaluation_params["iou_threshold"] != 0.5
74
- )
75
- if is_custom_iou_threshold:
76
- iou_t = np.where(cocoEval.params.iouThrs == evaluation_params["iou_threshold"])[0][0]
72
+ iouThrs = cocoEval.params.iouThrs
73
+ evaluation_params = evaluation_params or {}
74
+ iou_threshold = evaluation_params.get("iou_threshold", 0.5)
75
+ iou_threshold_per_class = evaluation_params.get("iou_threshold_per_class")
76
+ if iou_threshold_per_class is not None:
77
+ iou_idx_per_class = {
78
+ cocoGt.getCatIds(catNms=[class_name])[0]: np.where(np.isclose(iouThrs, iou_thres))[0][0]
79
+ for class_name, iou_thres in iou_threshold_per_class.items()
80
+ }
81
+ else:
82
+ iou_idx = np.where(np.isclose(iouThrs, iou_threshold))[0][0]
83
+ iou_idx_per_class = {cat_id: iou_idx for cat_id in cocoGt.getCatIds()}
77
84
 
78
85
  eval_img_dict = get_eval_img_dict(cocoEval)
79
86
  eval_img_dict_cls = get_eval_img_dict(cocoEval_cls)
80
- matches = get_matches(eval_img_dict, eval_img_dict_cls, cocoEval_cls, iou_t=iou_t)
87
+ matches = get_matches(
88
+ eval_img_dict,
89
+ eval_img_dict_cls,
90
+ cocoEval_cls,
91
+ iou_idx_per_class=iou_idx_per_class,
92
+ )
81
93
 
82
94
  params = {
83
95
  "iouThrs": cocoEval.params.iouThrs,
84
96
  "recThrs": cocoEval.params.recThrs,
85
- "evaluation_params": evaluation_params or {},
97
+ "evaluation_params": evaluation_params,
98
+ "iou_idx_per_class": iou_idx_per_class,
86
99
  }
87
100
  coco_metrics = {"mAP": cocoEval.stats[0], "precision": cocoEval.eval["precision"]}
88
101
  coco_metrics["AP50"] = cocoEval.stats[1]
@@ -204,27 +217,6 @@ def get_eval_img_dict(cocoEval):
204
217
  return eval_img_dict
205
218
 
206
219
 
207
- def get_eval_img_dict_cls(cocoEval_cls):
208
- """
209
- type cocoEval_cls: COCOeval
210
- """
211
- # For miss-classification
212
- aRng = cocoEval_cls.params.areaRng[0]
213
- eval_img_dict_cls = defaultdict(list) # img_id : dt/gt
214
- for i, eval_img in enumerate(cocoEval_cls.evalImgs):
215
- if eval_img is None or eval_img["aRng"] != aRng:
216
- continue
217
- img_id = eval_img["image_id"]
218
- cat_id = eval_img["category_id"]
219
- ious = cocoEval_cls.ious[(img_id, cat_id)]
220
- # ! inplace operation
221
- eval_img["ious"] = ious
222
- eval_img_dict_cls[img_id].append(eval_img)
223
- eval_img_dict_cls = dict(eval_img_dict_cls)
224
- assert np.all([len(x) == 1 for x in eval_img_dict_cls.values()])
225
- return eval_img_dict_cls
226
-
227
-
228
220
  def _get_missclassified_match(eval_img_cls, dt_id, gtIds_orig, dtIds_orig, iou_t):
229
221
  # Correction on miss-classification
230
222
  gt_idx = np.nonzero(eval_img_cls["gtMatches"][iou_t] == dt_id)[0]
@@ -242,7 +234,12 @@ def _get_missclassified_match(eval_img_cls, dt_id, gtIds_orig, dtIds_orig, iou_t
242
234
  return None, None
243
235
 
244
236
 
245
- def get_matches(eval_img_dict: dict, eval_img_dict_cls: dict, cocoEval_cls, iou_t: int = 0):
237
+ def get_matches(
238
+ eval_img_dict: dict,
239
+ eval_img_dict_cls: dict,
240
+ cocoEval_cls,
241
+ iou_idx_per_class: dict = None,
242
+ ):
246
243
  """
247
244
  type cocoEval_cls: COCOeval
248
245
  """
@@ -255,7 +252,8 @@ def get_matches(eval_img_dict: dict, eval_img_dict_cls: dict, cocoEval_cls, iou_
255
252
  gt_ids_orig_cls = [_["id"] for i in cat_ids for _ in cocoEval_cls._gts[img_id, i]]
256
253
 
257
254
  for eval_img in eval_imgs:
258
-
255
+ cat_id = eval_img["category_id"]
256
+ iou_t = iou_idx_per_class[cat_id]
259
257
  dtIds = np.array(eval_img["dtIds"])
260
258
  gtIds = np.array(eval_img["gtIds"])
261
259
  dtm = eval_img["dtMatches"][iou_t]
@@ -8,6 +8,7 @@ from supervisely.api.api import Api
8
8
  from supervisely.io.fs import dir_empty, get_directory_size
9
9
  from supervisely.nn.benchmark.visualization.widgets import BaseWidget
10
10
  from supervisely.task.progress import tqdm_sly
11
+ from supervisely import logger
11
12
 
12
13
 
13
14
  class Renderer:
@@ -95,6 +96,7 @@ class Renderer:
95
96
  pth = Path(self.base_dir).joinpath(self.report_name)
96
97
  with open(pth, "w") as f:
97
98
  f.write(report_link)
99
+ logger.debug(f"Report link: {self._get_report_link(api, team_id, remote_dir)}")
98
100
  return str(pth)
99
101
 
100
102
  def _get_report_link(self, api: Api, team_id: int, remote_dir: str):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.282
3
+ Version: 6.73.284
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -561,18 +561,18 @@ supervisely/collection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
561
561
  supervisely/collection/key_indexed_collection.py,sha256=x2UVlkprspWhhae9oLUzjTWBoIouiWY9UQSS_MozfH0,37643
562
562
  supervisely/collection/str_enum.py,sha256=Zp29yFGvnxC6oJRYNNlXhO2lTSdsriU1wiGHj6ahEJE,1250
563
563
  supervisely/convert/__init__.py,sha256=pF1bOrg8SzkdFn90AWGRmVa9OQrHABY0gTlgurJ86Tw,962
564
- supervisely/convert/base_converter.py,sha256=NHbOYxfZ8Yfs6qJz8zuR5kzKSlkfQs9-fpuafwdAGnA,18583
564
+ supervisely/convert/base_converter.py,sha256=m4wh1BZIW_wbzZk4eS3PN50TMIsb2ZPud2RjVcPfQxY,18627
565
565
  supervisely/convert/converter.py,sha256=tWxTDfFv7hwzQhUQrBxzfr6WP8FUGFX_ewg5T2HbUYo,8959
566
566
  supervisely/convert/image/__init__.py,sha256=JEuyaBiiyiYmEUYqdn8Mog5FVXpz0H1zFubKkOOm73I,1395
567
- supervisely/convert/image/image_converter.py,sha256=r-qdhuwOsk727mXIM26ucQhkoIKigu1M0BF-tw9IfGg,10321
567
+ supervisely/convert/image/image_converter.py,sha256=8vak8ZoKTN1ye2ZmCTvCZ605-Rw1AFLIEo7bJMfnR68,10426
568
568
  supervisely/convert/image/image_helper.py,sha256=fdV0edQD6hVGQ8TXn2JGDzsnrAXPDMacHBQsApzOME8,3677
569
569
  supervisely/convert/image/cityscapes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
570
- supervisely/convert/image/cityscapes/cityscapes_converter.py,sha256=msmsR2W-Xiod06dwn-MzmkbrEmQQqlKh7zyfTrW6YQw,7854
570
+ supervisely/convert/image/cityscapes/cityscapes_converter.py,sha256=tnelQJHvGz_IGMXWe-EKWAkBhexRzmkv_0Kln5sN12E,8100
571
571
  supervisely/convert/image/cityscapes/cityscapes_helper.py,sha256=in5nR7__q_u5dCkVtZmynfZ_ZuvsIAHrTzyTG4EvNgU,2988
572
572
  supervisely/convert/image/coco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
573
- supervisely/convert/image/coco/coco_anntotation_converter.py,sha256=79rhAy_nkudxEgJDLW0BziUz808-fSqTOnlUeN-kvn8,6603
574
- supervisely/convert/image/coco/coco_converter.py,sha256=7czTd4I1we_HxEc9diQiXPC2pXAtnoqSnFSVCtNOmP4,5431
575
- supervisely/convert/image/coco/coco_helper.py,sha256=dUk5vCsIxeZDbpjs8_oviPGQDW7CosjqbnjwaJQd0mU,32849
573
+ supervisely/convert/image/coco/coco_anntotation_converter.py,sha256=O1PQbwrbnpQBks2pcz2nbAnhSqpKqNk13B2ARk_roFM,7078
574
+ supervisely/convert/image/coco/coco_converter.py,sha256=7dW7vE6yTRz7O31vTVSnEA4MDCc_UXTqc2UFEqaKorI,5650
575
+ supervisely/convert/image/coco/coco_helper.py,sha256=ykZe_M_yfDqJT9FoQXQ3zuLbQMO0l1WP75QMbvKEx5Y,32866
576
576
  supervisely/convert/image/csv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
577
577
  supervisely/convert/image/csv/csv_converter.py,sha256=iLyc2PAVtlsAq7blnGH4iS1_D7Ai6-4UsdI_RlDVB9Q,11677
578
578
  supervisely/convert/image/csv/csv_helper.py,sha256=-nR192IfMU0vTlNRoKXu5FS6tTs9fENqySyeKKyemRs,8409
@@ -592,7 +592,7 @@ supervisely/convert/image/medical2d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
592
592
  supervisely/convert/image/medical2d/medical2d_converter.py,sha256=cYEaRfr8YFxEG_Pv-_SVMxrqZudi3kWbGQ3aArL2mds,8156
593
593
  supervisely/convert/image/medical2d/medical2d_helper.py,sha256=pfLRCSFbFa5EIhmbB7kdmdWRu01OwIEDPXeNHzAeagg,12329
594
594
  supervisely/convert/image/multi_view/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
595
- supervisely/convert/image/multi_view/multi_view.py,sha256=TF53gXEpfudMb2MPbzunNqrR44tMTCOBBMBqGGwqQAQ,3794
595
+ supervisely/convert/image/multi_view/multi_view.py,sha256=V-6oFN6oDre7UhejfyDkGKAg4rbM3C9JCQ8pHhuUBb8,4436
596
596
  supervisely/convert/image/multispectral/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
597
597
  supervisely/convert/image/multispectral/multispectral_converter.py,sha256=T3etYVNI0AUUrQsQhxw_r85NthXrqhqmdZQfz8kUY0g,5194
598
598
  supervisely/convert/image/pascal_voc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -606,7 +606,7 @@ supervisely/convert/image/sly/fast_sly_image_converter.py,sha256=pZmQzhx9FrHwgVn
606
606
  supervisely/convert/image/sly/sly_image_converter.py,sha256=097ijLa_62ZBu0elRx0xX_wpi9tmwgNZonVvBccfclg,12842
607
607
  supervisely/convert/image/sly/sly_image_helper.py,sha256=5Ri8fKb5dzh5b3v8AJ5u8xVFOQfAtoWqZ7HktPsCjTI,7373
608
608
  supervisely/convert/image/yolo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
609
- supervisely/convert/image/yolo/yolo_converter.py,sha256=cg5___X5MzvR-rZbNLmaKtr0MdRnyqtEzbBq5UBnYZ0,11171
609
+ supervisely/convert/image/yolo/yolo_converter.py,sha256=Wn5dR05y4SEPONcaxWr9ofnbvbf-SbRZN0fkksk5Dps,11391
610
610
  supervisely/convert/image/yolo/yolo_helper.py,sha256=IwyBMZE_3eblsHhw8egeZUR9h_NciwjrxvVLNuZbxY4,19194
611
611
  supervisely/convert/pointcloud/__init__.py,sha256=WPeIpPoTWDIKAa0lF6t2SMUhFNZ0l-vKujf6yD6w7SA,589
612
612
  supervisely/convert/pointcloud/pointcloud_converter.py,sha256=yCCpzm7GrvL6WT4lNesvtYWWwdO3DO32JIOBBSSQgSA,7130
@@ -637,8 +637,8 @@ supervisely/convert/pointcloud_episodes/bag/bag_converter.py,sha256=jzWKXoFUWu11
637
637
  supervisely/convert/pointcloud_episodes/lyft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
638
638
  supervisely/convert/pointcloud_episodes/lyft/lyft_converter.py,sha256=QXreWUJ-QhoWgLPqRxCayatYCCCuSV6Z2XCZKScrD3o,10419
639
639
  supervisely/convert/pointcloud_episodes/nuscenes_conv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
640
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py,sha256=hveKmKVe-jOvME1mMbDwynTlL5kKqd53T9BDmFitbkM,12808
641
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py,sha256=RrTlskRrqxwzrjFfT4e5aI9YeAxnun7Io9fjoicZmnY,8959
640
+ supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py,sha256=4HWouf-H4e5M_Hwd481DpLq17mIZMGkhRVOBgY4alXM,12692
641
+ supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py,sha256=cJTwhFn1JgblbPjrTrZu30y6FxyjGF-12sMFfvN1xzM,8969
642
642
  supervisely/convert/pointcloud_episodes/sly/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
643
643
  supervisely/convert/pointcloud_episodes/sly/sly_pointcloud_episodes_converter.py,sha256=fSEGxuTtFTAOLNBAZncOxw9PVALBOtB7yZ8qTCaET7w,6102
644
644
  supervisely/convert/pointcloud_episodes/sly/sly_pointcloud_episodes_helper.py,sha256=h4WvNH6cEHtjxxhCnU7Hs2vkyJMye0qwabqXNYVTywE,3570
@@ -744,23 +744,23 @@ supervisely/nn/artifacts/utils.py,sha256=C4EaMi95MAwtK5TOnhK4sQ1BWvgwYBxXyRStkhY
744
744
  supervisely/nn/artifacts/yolov5.py,sha256=slh05EpQsxqgKwB9KMClshdBxPBN3ZWZ6S4B80ECEt4,1724
745
745
  supervisely/nn/artifacts/yolov8.py,sha256=sFd9kU7Gdowq6WH1S3NdlQeoL9jjQKmRYb51fG_wbDk,1446
746
746
  supervisely/nn/benchmark/__init__.py,sha256=7jDezvavJFtO9mDeB2TqW8N4sD8TsHQBPpA9RESleIQ,610
747
- supervisely/nn/benchmark/base_benchmark.py,sha256=2bNKZgcU3l1CTkdP9Glfze5-G2JGtU-V3BFEOKnrdXw,25281
748
- supervisely/nn/benchmark/base_evaluator.py,sha256=6JbAkphcZTghfIYFyrWDWb59_exbPVx23yot_LPOfIA,5221
747
+ supervisely/nn/benchmark/base_benchmark.py,sha256=Xnb0jL0voBPC-s_eVYSYbYv-xVfLYtQf1tHLnJ9ktq8,25713
748
+ supervisely/nn/benchmark/base_evaluator.py,sha256=sc8gNn3myGA8sGnP6EIiTp24JPXUQ9Ou-8BmTf-Dt7w,5248
749
749
  supervisely/nn/benchmark/base_visualizer.py,sha256=7woiYmztDzYZlbhL1hTfJnIi26RFi4obF2VLA519uxQ,10092
750
750
  supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
751
751
  supervisely/nn/benchmark/comparison/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
752
752
  supervisely/nn/benchmark/comparison/base_visualizer.py,sha256=ldJHfGnrbudbxP6ErzBIorPv9r2L9vV8o6UTNqLBfGI,5341
753
753
  supervisely/nn/benchmark/comparison/model_comparison.py,sha256=qgd8TwXJ2aiIqB29__EnDV391fBcRfdaIzUYpTyFp9w,7055
754
754
  supervisely/nn/benchmark/comparison/detection_visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
755
- supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py,sha256=lBqcNsyOX04R2jL6Efu43xB2aHC1qx28Kw27DLuk66o,28336
756
- supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py,sha256=JSaFJnKptkITPjzBK_1T-HpYLnVuEzIvRVeismmk5O0,11159
755
+ supervisely/nn/benchmark/comparison/detection_visualization/text_templates.py,sha256=JQ2DqGsvUBbjVmEsq9pGc41U8WQrtaX_Gin4IBguIow,28735
756
+ supervisely/nn/benchmark/comparison/detection_visualization/visualizer.py,sha256=1PZU9P7VP4lPZDuv9dg_f3il03SQ_zRr5G_L6Q72gbQ,11421
757
757
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/__init__.py,sha256=2cqjAOwahJoptYhbFKEWws8gRW3K3bxlA9KnQleCrsA,1125
758
758
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/avg_precision_by_class.py,sha256=f4FMY-XCiNs0VA-e0aOFVNImjTQSnF4wTLNRSlj2oYA,4636
759
759
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/calibration_score.py,sha256=IKg2rofo21xGxQIoQyNlZ-UqOpAVviSn_A9SDkokC0w,7592
760
760
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/explore_predictions.py,sha256=axaviTZ4dLVWIc2R-o0Kv8g8Zr1SQVfyeokoLEA9Eqw,6484
761
761
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/localization_accuracy.py,sha256=OYmnloods1UYQ8SIPAcyOK33w0iYSn637OeMKNTrgbA,5342
762
762
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/outcome_counts.py,sha256=vev6EOc7-01i-5VAyQQwm9FGOMFYcWfJ2Y6ufUWg-DQ,13143
763
- supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py,sha256=YZIadwwQQvr5XfzFGj6_NGylad00ERLV1fYnke_g_VA,9256
763
+ supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/overview.py,sha256=drRm_Hn8Da4Oc_dsa4ol_mZSeMRNYT1-Zkmb0LbiEAA,10193
764
764
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/pr_curve.py,sha256=66hs426dR1_TUps9K-UhBp5_xBiFjIouKcF_5gP-Hn8,4797
765
765
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/precision_recal_f1.py,sha256=602GFNpKZjeRhxUTdlcE6ZczcFEGEjp0qLoTbkM54M4,11558
766
766
  supervisely/nn/benchmark/comparison/detection_visualization/vis_metrics/speedtest.py,sha256=sQDkzfpVNaSYBHVcHYqydRSWN0i-yV9uhtEAggg295A,10879
@@ -778,17 +778,17 @@ supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/speedtest.
778
778
  supervisely/nn/benchmark/instance_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
779
779
  supervisely/nn/benchmark/instance_segmentation/benchmark.py,sha256=lTDzgKGpfeF5o_a2nS56wiAsUQPH1eubk37b9CaB2KI,1171
780
780
  supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
781
- supervisely/nn/benchmark/instance_segmentation/evaluator.py,sha256=KZhCZ0UWfw_eCrNDtv4WJj3ZQI2w66qGl4Lu9Bl85_Q,2784
782
- supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=sGiGnpIyuOP35f4NoDT0BWNmscsx_T1XZ_igGvCkkBg,25481
781
+ supervisely/nn/benchmark/instance_segmentation/evaluator.py,sha256=mpCi8S6YNwlVvgcERQSHBOhC9PrSfQkQ55pPTcK6V9c,2811
782
+ supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=_ZIU_3-xlUGKTcbEthxB4Ngt12azdC7pxpgqHHw7M3I,25780
783
783
  supervisely/nn/benchmark/instance_segmentation/visualizer.py,sha256=8NscOKy7JK4AG-Czu3SM0qJQXLDfKD9URdG1d4nz89E,564
784
784
  supervisely/nn/benchmark/object_detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
785
785
  supervisely/nn/benchmark/object_detection/base_vis_metric.py,sha256=XjUnFCnCMYLrpjojIOwiRNaSsSLYpozTHWfwLkaCd5U,1612
786
786
  supervisely/nn/benchmark/object_detection/benchmark.py,sha256=Wb4xlFXilIMVfsifNNQY25uE52NeEDLzQpnq8QPYq9U,1086
787
787
  supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
788
- supervisely/nn/benchmark/object_detection/evaluator.py,sha256=Oajs2a-lpudSF076_99-he7uDtqnmQGp4aayoQjNRH0,6801
789
- supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=j7-_ZGjW8qRpv_v9EMyqO1rK9pkcgjRcInTA8CL3we4,20169
790
- supervisely/nn/benchmark/object_detection/text_templates.py,sha256=wv6BkcrfdEKldbXtz4ljkbENIIOdeiUr79U4iRgUx6A,25727
791
- supervisely/nn/benchmark/object_detection/visualizer.py,sha256=Nemp2ZjipbK3S119Yx_izoZuS86jx2U-wtah8TfthS0,31734
788
+ supervisely/nn/benchmark/object_detection/evaluator.py,sha256=EOQQbmwQqjjvbRu3tY24SRA7K8nyqshR92gUcP1lcrY,7371
789
+ supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=MLVRnSwMQ9lfrlgBt4ThIHTVKY-6zuuEWK5-yVsmaj0,21140
790
+ supervisely/nn/benchmark/object_detection/text_templates.py,sha256=J5xUPCGY-QWxc5AEt_u9_2r5q0LBlIzsa007H0GgoeU,26026
791
+ supervisely/nn/benchmark/object_detection/visualizer.py,sha256=NpLKVW5fo6N0kYzgLsfY66wvCv38G3k-SNm4HImXt6g,32366
792
792
  supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py,sha256=AXCLHEySEdR-B-5sfDoWBmmOLBVlyW2U_xr8Ta42sQI,2096
793
793
  supervisely/nn/benchmark/object_detection/vis_metrics/confidence_distribution.py,sha256=OlwkPgzEQ-RegcLZHVUVOL0n6I_2iayPVpAIie4y2O8,3615
794
794
  supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=r_saaZI4WB7C7ykNb1obmf8kEOkphLA4pInDoS6dXXU,4005
@@ -797,11 +797,11 @@ supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py,sha
797
797
  supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py,sha256=6y2Kx-R_t4SdJkdWNyZQ6TGjCC-u6KhXb4cCno4GuTk,2882
798
798
  supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py,sha256=7rObk7WNsfwK7xBWl3aOxcn0uD48njEc04fQIPHc3_4,4678
799
799
  supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py,sha256=lv4Bk8W4X8ZhvQKyMXI46d240PNlMFx1hdji_aoTS50,3601
800
- supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py,sha256=Z8qArtjqjjRW5Z4mBuanzK3b4LLfH6NgNbO2Lt0kXyo,4316
800
+ supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py,sha256=byucJuHYWSXIZU8U1Dc44QDpG3lTlhoNdUfD1b-uriw,4721
801
801
  supervisely/nn/benchmark/object_detection/vis_metrics/model_predictions.py,sha256=gsGDsesiwOcqeFvHr33b4PSJNw6MoA5brO-qRydRtsA,5944
802
802
  supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py,sha256=HuTgisYmXCSUeF5WOahy-uaCdvRLsNzg28BDrZ-5hww,7161
803
803
  supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=GBq0KlPka5z4cxHcKCe2eVOI_h3qlWUqGCyhYs6mjrk,6825
804
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=tEHxK8NuquAcMcDxdn2G6GEIHp7eauOiTIQP1yFvgSs,5058
804
+ supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=M6E--Yd1ztP4VBjR6VDUVrj2hgs5mwJF-vhWIjgVGkw,6376
805
805
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py,sha256=EeZmyNlTVQLQ-0wIDGdvFmRkahJBBiOKSmWiAJ8Bfks,3478
806
806
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py,sha256=Bl_buVvH8SVqwsc4DcHnojMOqpwTnRgXFt9yw_Y1BR0,1607
807
807
  supervisely/nn/benchmark/object_detection/vis_metrics/precision.py,sha256=cAgMrp13uulHfM8xnPDZyR6PqS8nck1Fo7YPpvHPCbw,2708
@@ -814,7 +814,7 @@ supervisely/nn/benchmark/semantic_segmentation/__init__.py,sha256=Hx5bFhRZq8BXvN
814
814
  supervisely/nn/benchmark/semantic_segmentation/base_vis_metric.py,sha256=mwGjRUTPrIj56WHsxNW_4fcZM0uw1xm6B5wh25FijyQ,1788
815
815
  supervisely/nn/benchmark/semantic_segmentation/benchmark.py,sha256=8rnU6I94q0GUdXWwluZu0_Sac_eU2-Az133tHF1dA3U,1202
816
816
  supervisely/nn/benchmark/semantic_segmentation/evaluation_params.yaml,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
817
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py,sha256=0zNR60As5-8ww3BVj_qNq70VJUDyuUAxcUiSj7FQS_Q,7233
817
+ supervisely/nn/benchmark/semantic_segmentation/evaluator.py,sha256=XafPMpGL6v0ZQ-m7DkEjoY7W6fGCJNKolql5BA3M8V0,7261
818
818
  supervisely/nn/benchmark/semantic_segmentation/metric_provider.py,sha256=GwdRvyG0_nFpng6jN8ISFcMLfDbBd-fwdtoWR2XPKw4,6552
819
819
  supervisely/nn/benchmark/semantic_segmentation/text_templates.py,sha256=7yRRD2FAdJHGSRqBVIjNjzCduKzaepA1OWtggi7B0Dg,8580
820
820
  supervisely/nn/benchmark/semantic_segmentation/visualizer.py,sha256=Nt2-OOWKQ8fbaXFk5QeEaMtMURKPQebgjzDytVgQk0g,13196
@@ -833,7 +833,7 @@ supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py,sha256=0
833
833
  supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py,sha256=rRdYZxmhQX4T3RsXJVGp34NMZPz8jUHtVvBN5BpPJ5I,603
834
834
  supervisely/nn/benchmark/utils/__init__.py,sha256=JHT73gWdwgLJKTiCpDdwggm1t_EWB0JCC90-zD7reXM,741
835
835
  supervisely/nn/benchmark/utils/detection/__init__.py,sha256=L3QKGuKUlR2N_QFRTRsa6gfLDbksIaFMYO0Hukxxy1U,172
836
- supervisely/nn/benchmark/utils/detection/calculate_metrics.py,sha256=1Vrf4Npf8lHAE_WZRS90fa5jFh4BofDn6RWCFcd48r8,11484
836
+ supervisely/nn/benchmark/utils/detection/calculate_metrics.py,sha256=LXZET5yLp9S7Uq2eX4HpAMnBMxTI5Q2CgKSc1mCfaRM,11388
837
837
  supervisely/nn/benchmark/utils/detection/metric_provider.py,sha256=cgF6uzF7XOvU2CpxyU7zuK1HH6hhNiIV3vQc8MAzwMU,19934
838
838
  supervisely/nn/benchmark/utils/detection/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
839
839
  supervisely/nn/benchmark/utils/detection/sly2coco.py,sha256=0O2LSCU5zIX34mD4hZIv8O3-j6LwnB0DqhiVPAiosO8,6883
@@ -845,7 +845,7 @@ supervisely/nn/benchmark/utils/semantic_segmentation/loader.py,sha256=_5ZZ7Nkd8W
845
845
  supervisely/nn/benchmark/utils/semantic_segmentation/utils.py,sha256=X5NiR02R-0To2_SuSGHZZccl_-Bupg5F9d7nziIMRMc,3874
846
846
  supervisely/nn/benchmark/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
847
847
  supervisely/nn/benchmark/visualization/evaluation_result.py,sha256=733HJL4rJa5XqCJydW9vSyaepvpHzym9wQsw1wFEgeI,10251
848
- supervisely/nn/benchmark/visualization/renderer.py,sha256=s1YexuKDHrI2gP-Qpz1fAXCh30dBQM7whnMwjbOf61M,3804
848
+ supervisely/nn/benchmark/visualization/renderer.py,sha256=j6dFqIGa9M0DbVVR-jgJab-MzUUiw47coHiiBF_H_jQ,3923
849
849
  supervisely/nn/benchmark/visualization/report_template.html,sha256=tylBK5Bb2cqKACK1GZUKyIjPS9yHQFHAS-QeEEwhqTE,2172
850
850
  supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=hBeVepHngTGVHK3MiWe8qZY87taifxnoUXq22W2xaqo,3724
851
851
  supervisely/nn/benchmark/visualization/widgets/__init__.py,sha256=UovmhwLH4Au81JFrFz0NwPasaIqPEI-zXN-JntTc2FU,949
@@ -1070,9 +1070,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1070
1070
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1071
1071
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1072
1072
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1073
- supervisely-6.73.282.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1074
- supervisely-6.73.282.dist-info/METADATA,sha256=qGp4_zjqrTMz3V40SpkZe9J1yy-1HCAPicbSc5vpeGo,33573
1075
- supervisely-6.73.282.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1076
- supervisely-6.73.282.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1077
- supervisely-6.73.282.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1078
- supervisely-6.73.282.dist-info/RECORD,,
1073
+ supervisely-6.73.284.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1074
+ supervisely-6.73.284.dist-info/METADATA,sha256=GriDRRnOtHK84_HcMIMCD6z4qfCCZSHH5NyJsTmW3e4,33573
1075
+ supervisely-6.73.284.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1076
+ supervisely-6.73.284.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1077
+ supervisely-6.73.284.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1078
+ supervisely-6.73.284.dist-info/RECORD,,