supervisely 6.73.200__py3-none-any.whl → 6.73.201__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -449,6 +449,28 @@ class BaseBenchmark:
449
449
  vis.visualize()
450
450
 
451
451
  def _get_or_create_diff_project(self) -> Tuple[ProjectInfo, bool]:
452
+
453
+ dt_ds_id_to_diff_ds_info = {}
454
+
455
+ def _get_or_create_diff_dataset(dt_dataset_id, dt_datasets):
456
+ if dt_dataset_id in dt_ds_id_to_diff_ds_info:
457
+ return dt_ds_id_to_diff_ds_info[dt_dataset_id]
458
+ dt_dataset = dt_datasets[dt_dataset_id]
459
+ if dt_dataset.parent_id is None:
460
+ diff_dataset = self.api.dataset.create(
461
+ diff_project_info.id,
462
+ dt_dataset.name,
463
+ )
464
+ else:
465
+ parent_dataset = _get_or_create_diff_dataset(dt_dataset.parent_id, dt_datasets)
466
+ diff_dataset = self.api.dataset.create(
467
+ diff_project_info.id,
468
+ dt_dataset.name,
469
+ parent_id=parent_dataset.id,
470
+ )
471
+ dt_ds_id_to_diff_ds_info[dt_dataset_id] = diff_dataset
472
+ return diff_dataset
473
+
452
474
  diff_project_name = self._generate_diff_project_name(self.dt_project_info.name)
453
475
  diff_workspace_id = self.dt_project_info.workspace_id
454
476
  diff_project_info = self.api.project.get_info_by_name(
@@ -460,8 +482,12 @@ class BaseBenchmark:
460
482
  diff_project_info = self.api.project.create(
461
483
  diff_workspace_id, diff_project_name, change_name_if_conflict=True
462
484
  )
463
- for dataset in self.api.dataset.get_list(self.dt_project_info.id):
464
- self.api.dataset.create(diff_project_info.id, dataset.name)
485
+ dt_datasets = {
486
+ ds.id: ds
487
+ for ds in self.api.dataset.get_list(self.dt_project_info.id, recursive=True)
488
+ }
489
+ for dataset in dt_datasets:
490
+ _get_or_create_diff_dataset(dataset, dt_datasets)
465
491
  return diff_project_info, is_existed
466
492
 
467
493
  def upload_visualizations(self, dest_dir: str):
@@ -7,7 +7,7 @@ import numpy as np
7
7
 
8
8
  from supervisely import Bitmap
9
9
  from supervisely._utils import batched
10
- from supervisely.project.project import Project, OpenMode
10
+ from supervisely.project.project import Dataset, OpenMode, Project
11
11
 
12
12
 
13
13
  def sly2coco(
@@ -20,11 +20,7 @@ def sly2coco(
20
20
  ):
21
21
  from pycocotools import mask as maskUtils # pylint: disable=import-error
22
22
 
23
- datasets = [
24
- name
25
- for name in os.listdir(sly_project_path)
26
- if os.path.isdir(pjoin(sly_project_path, name))
27
- ]
23
+ project = Project(sly_project_path, mode=OpenMode.READ)
28
24
 
29
25
  # Categories
30
26
  meta_path = pjoin(sly_project_path, "meta.json")
@@ -52,13 +48,13 @@ def sly2coco(
52
48
  images = []
53
49
  annotations = []
54
50
  annotation_id = 1
55
- project = Project(sly_project_path, mode=OpenMode.READ)
56
51
  total = project.total_items
57
52
  with progress(message="Evaluation: Converting to COCO format", total=total) as pbar:
58
53
  img_id = 1
59
- for dataset_name in datasets:
60
- ann_path = pjoin(sly_project_path, dataset_name, "ann")
61
- imginfo_path = pjoin(sly_project_path, dataset_name, "img_info")
54
+ for dataset in sorted(project.datasets, key=lambda x: x.name):
55
+ dataset: Dataset
56
+ ann_path = dataset.ann_dir
57
+ imginfo_path = dataset.img_info_dir
62
58
  ann_files = sorted(os.listdir(ann_path))
63
59
  for batch in batched(ann_files, 30):
64
60
  for ann_file in batch:
@@ -75,7 +71,7 @@ def sly2coco(
75
71
  "width": img_w,
76
72
  "height": img_h,
77
73
  "sly_id": img_info["id"],
78
- "dataset": dataset_name,
74
+ "dataset": dataset.name,
79
75
  }
80
76
  images.append(img)
81
77
  for label in ann["objects"]:
@@ -1,22 +1,28 @@
1
1
  import os
2
+
2
3
  from supervisely.io.json import dump_json_file
3
- from supervisely.nn.benchmark.evaluation import BaseEvaluator
4
4
  from supervisely.nn.benchmark.coco_utils import read_coco_datasets, sly2coco
5
+ from supervisely.nn.benchmark.evaluation import BaseEvaluator
5
6
  from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
6
7
 
7
8
 
8
9
  class InstanceSegmentationEvaluator(BaseEvaluator):
9
10
  def evaluate(self):
10
- self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
11
+ try:
12
+ self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
13
+ except AssertionError as e:
14
+ raise ValueError(
15
+ f"{e}. Please make sure that your GT and DT projects are correct. "
16
+ "If GT project has nested datasets and DT project was crated with NN app, "
17
+ "try to use newer version of NN app."
18
+ )
19
+
11
20
  self._dump_datasets()
12
21
  self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
13
22
  with self.pbar(message="Evaluation: Calculating metrics", total=10) as p:
14
23
  self.eval_data = calculate_metrics(
15
- self.cocoGt,
16
- self.cocoDt,
17
- iouType="segm",
18
- progress_cb=p.update
19
- )
24
+ self.cocoGt, self.cocoDt, iouType="segm", progress_cb=p.update
25
+ )
20
26
  self._dump_eval_results()
21
27
 
22
28
  def _convert_to_coco(self):
@@ -41,8 +47,12 @@ class InstanceSegmentationEvaluator(BaseEvaluator):
41
47
  "Not found any predictions. "
42
48
  "Please make sure that your model produces predictions."
43
49
  )
44
- assert cocoDt_json['categories'] == cocoGt_json['categories']
45
- assert [x['id'] for x in cocoDt_json['images']] == [x['id'] for x in cocoGt_json['images']]
50
+ assert (
51
+ cocoDt_json["categories"] == cocoGt_json["categories"]
52
+ ), "Object classes in GT and DT projects are different"
53
+ assert [f'{x["dataset"]}/{x["file_name"]}' for x in cocoDt_json["images"]] == [
54
+ f'{x["dataset"]}/{x["file_name"]}' for x in cocoGt_json["images"]
55
+ ], "Images in GT and DT projects are different"
46
56
  return cocoGt_json, cocoDt_json
47
57
 
48
58
  def _dump_datasets(self):
@@ -8,7 +8,14 @@ from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
8
8
 
9
9
  class ObjectDetectionEvaluator(BaseEvaluator):
10
10
  def evaluate(self):
11
- self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
11
+ try:
12
+ self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
13
+ except AssertionError as e:
14
+ raise ValueError(
15
+ f"{e}. Please make sure that your GT and DT projects are correct. "
16
+ "If GT project has nested datasets and DT project was crated with NN app, "
17
+ "try to use newer version of NN app."
18
+ )
12
19
  self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
13
20
  with self.pbar(message="Evaluation: Calculating metrics", total=10) as p:
14
21
  self.eval_data = calculate_metrics(
@@ -42,7 +49,9 @@ class ObjectDetectionEvaluator(BaseEvaluator):
42
49
  assert (
43
50
  cocoDt_json["categories"] == cocoGt_json["categories"]
44
51
  ), "Classes in GT and Pred projects must be the same"
45
- assert [x["id"] for x in cocoDt_json["images"]] == [x["id"] for x in cocoGt_json["images"]]
52
+ assert [f'{x["dataset"]}/{x["file_name"]}' for x in cocoDt_json["images"]] == [
53
+ f'{x["dataset"]}/{x["file_name"]}' for x in cocoGt_json["images"]
54
+ ], "Images in GT and DT projects are different"
46
55
  return cocoGt_json, cocoDt_json
47
56
 
48
57
  def _dump_eval_results(self):
@@ -49,7 +49,7 @@ Learn more about Model Benchmark, implementation details, and how to use the cha
49
49
 
50
50
  markdown_key_metrics = """## Key Metrics
51
51
 
52
- Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy, Calibration Score, and Inference Speed.
52
+ Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy and Calibration Score.
53
53
 
54
54
  - **Mean Average Precision (mAP)**: A comprehensive metric of detection and instance segmentation performance. mAP calculates the <abbr title="{}">average precision</abbr> across all classes at different levels of <abbr title="{}">IoU thresholds</abbr> and precision-recall trade-offs. In other words, it evaluates the performance of a model by considering its ability to detect and localize objects accurately across multiple IoU thresholds and object categories.
55
55
  - **Precision**: Precision indicates how often the model's predictions are actually correct when it predicts an object. This calculates the ratio of correct predictions to the total number of predictions made by the model.
@@ -57,7 +57,6 @@ Here, we comprehensively assess the model's performance by presenting a broad se
57
57
  - **Intersection over Union (IoU)**: IoU measures the overlap between two masks: one predicted by the model and one from the ground truth. It is calculated as the area of intersection between the predicted mask and the ground truth mask, divided by the area of their union. A higher IoU score indicates better alignment between the predicted and ground truth masks.
58
58
  - **Classification Accuracy**: We additionally measure the classification accuracy of an instance segmentation model. This metric represents the percentage of correctly labeled instances among all instances where the predicted segmentation masks accurately match the ground truth masks (with an IoU greater than 0.5, regardless of class).
59
59
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
60
- - **Inference Speed**: The number of frames per second (FPS) the model can process, measured with a batch size of 1. The inference speed is important in applications, where real-time inference is required. Additionally, slower models pour more GPU resources, so their inference cost is higher.
61
60
  """
62
61
 
63
62
  markdown_explorer = """## Explore Predictions
@@ -49,7 +49,7 @@ Learn more about Model Benchmark, implementation details, and how to use the cha
49
49
 
50
50
  markdown_key_metrics = """## Key Metrics
51
51
 
52
- Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy, Calibration Score, and Inference Speed.
52
+ Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy and Calibration Score.
53
53
 
54
54
  - **Mean Average Precision (mAP)**: A comprehensive metric of detection performance. mAP calculates the <abbr title="{}">average precision</abbr> across all classes at different levels of <abbr title="{}">IoU thresholds</abbr> and precision-recall trade-offs. In other words, it evaluates the performance of a model by considering its ability to detect and localize objects accurately across multiple IoU thresholds and object categories.
55
55
  - **Precision**: Precision indicates how often the model's predictions are actually correct when it predicts an object. This calculates the ratio of correct detections to the total number of detections made by the model.
@@ -57,7 +57,6 @@ Here, we comprehensively assess the model's performance by presenting a broad se
57
57
  - **Intersection over Union (IoU)**: IoU measures how closely predicted bounding boxes match the actual (ground truth) bounding boxes. It is calculated as the area of overlap between the predicted bounding box and the ground truth bounding box, divided by the area of union of these bounding boxes.
58
58
  - **Classification Accuracy**: We additionally measure the classification accuracy of an object detection model. This metric represents the percentage of correctly labeled instances among all correctly localized bounding boxes (where the IoU for each box is greater than 0.5, regardless of class).
59
59
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
60
- - **Inference Speed**: The number of frames per second (FPS) the model can process, measured with a batch size of 1. The inference speed is important in applications, where real-time object detection is required. Additionally, slower models pour more GPU resources, so their inference cost is higher.
61
60
  """
62
61
 
63
62
  markdown_explorer = """## Explore Predictions
@@ -291,7 +291,7 @@ class MetricVis:
291
291
  zip(image_infos, ann_infos, project_metas)
292
292
  ):
293
293
  image_name = image_info.name
294
- image_url = image_info.full_storage_url
294
+ image_url = image_info.preview_url
295
295
  is_ignore = True if idx in [0, 1] else False
296
296
  widget.gallery.append(
297
297
  title=image_name,
@@ -37,7 +37,7 @@ class ExplorerGrid(MetricVis):
37
37
 
38
38
  for idx, (pred_image, ann_info) in enumerate(zip(pred_image_infos, ann_infos)):
39
39
  image_name = pred_image.name
40
- image_url = pred_image.full_storage_url
40
+ image_url = pred_image.preview_url
41
41
  widget.gallery.append(
42
42
  title=image_name,
43
43
  image_url=image_url,
@@ -33,7 +33,7 @@ class ModelPredictions(MetricVis):
33
33
  dt_project_id = self._loader.dt_project_info.id
34
34
 
35
35
  tmp = set()
36
- for dt_dataset in self._loader._api.dataset.get_list(dt_project_id):
36
+ for dt_dataset in self._loader._api.dataset.get_list(dt_project_id, recursive=True):
37
37
  names = [x.name for x in self._loader._api.image.get_list(dt_dataset.id)]
38
38
  tmp.update(names)
39
39
  df = self._loader.mp.prediction_table().round(2)
@@ -42,6 +42,7 @@ class Precision(MetricVis):
42
42
  y="precision",
43
43
  # title="Per-class Precision (Sorted by F1)",
44
44
  color="precision",
45
+ range_color=[0, 1],
45
46
  color_continuous_scale="Plasma",
46
47
  )
47
48
  fig.update_traces(hovertemplate="Class: %{x}<br>Precision: %{y:.2f}<extra></extra>")
@@ -40,6 +40,7 @@ class Recall(MetricVis):
40
40
  y="recall",
41
41
  # title="Per-class Recall (Sorted by F1)",
42
42
  color="recall",
43
+ range_color=[0, 1],
43
44
  color_continuous_scale="Plasma",
44
45
  )
45
46
  fig.update_traces(hovertemplate="Class: %{x}<br>Recall: %{y:.2f}<extra></extra>")
@@ -361,8 +361,19 @@ class Visualizer:
361
361
 
362
362
  gt_project = Project(gt_project_path, OpenMode.READ)
363
363
  pred_project = Project(pred_project_path, OpenMode.READ)
364
+ diff_dataset_id_to_info = {
365
+ ds.id: ds
366
+ for ds in self._api.dataset.get_list(self.diff_project_info.id, recursive=True)
367
+ }
368
+
369
+ def _get_full_name(ds_id: int):
370
+ ds_info = diff_dataset_id_to_info[ds_id]
371
+ if ds_info.parent_id is None:
372
+ return ds_info.name
373
+ return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
374
+
364
375
  diff_dataset_name_to_info = {
365
- ds.name: ds for ds in self._api.dataset.get_list(self.diff_project_info.id)
376
+ _get_full_name(ds_id): ds_info for ds_id, ds_info in diff_dataset_id_to_info.items()
366
377
  }
367
378
 
368
379
  matched_id_map = self._get_matched_id_map() # dt_id -> gt_id
@@ -378,6 +389,13 @@ class Visualizer:
378
389
  with self.pbar(
379
390
  message="Visualizations: Creating diff_project", total=pred_project.total_items
380
391
  ) as progress:
392
+ logger.debug(
393
+ "Creating diff project data",
394
+ extra={
395
+ "pred_project": [ds.name for ds in pred_project.datasets],
396
+ "gt_project": [ds.name for ds in gt_project.datasets],
397
+ },
398
+ )
381
399
  for pred_dataset in pred_project.datasets:
382
400
  pred_dataset: Dataset
383
401
  gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
@@ -460,8 +478,19 @@ class Visualizer:
460
478
  gt_project_path, pred_project_path = self._benchmark._download_projects(save_images=False)
461
479
  gt_project = Project(gt_project_path, OpenMode.READ)
462
480
  pred_project = Project(pred_project_path, OpenMode.READ)
481
+ diff_dataset_id_to_info = {
482
+ ds.id: ds
483
+ for ds in self._api.dataset.get_list(self.diff_project_info.id, recursive=True)
484
+ }
485
+
486
+ def _get_full_name(ds_id: int):
487
+ ds_info = diff_dataset_id_to_info[ds_id]
488
+ if ds_info.parent_id is None:
489
+ return ds_info.name
490
+ return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
491
+
463
492
  diff_dataset_name_to_info = {
464
- ds.name: ds for ds in self._api.dataset.get_list(self.diff_project_info.id)
493
+ _get_full_name(ds_id): ds_info for ds_id, ds_info in diff_dataset_id_to_info.items()
465
494
  }
466
495
 
467
496
  for pred_dataset in pred_project.datasets:
@@ -1498,9 +1498,27 @@ class Inference:
1498
1498
  if src_dataset_id in new_dataset_id:
1499
1499
  return new_dataset_id[src_dataset_id]
1500
1500
  dataset_info = api.dataset.get_info_by_id(src_dataset_id)
1501
- output_dataset_id = api.dataset.copy(
1502
- output_project_id, src_dataset_id, dataset_info.name, change_name_if_conflict=True
1503
- ).id
1501
+ if dataset_info.parent_id is None:
1502
+ output_dataset_id = api.dataset.copy(
1503
+ output_project_id,
1504
+ src_dataset_id,
1505
+ dataset_info.name,
1506
+ change_name_if_conflict=True,
1507
+ ).id
1508
+ else:
1509
+ parent_dataset_id = _get_or_create_new_dataset(
1510
+ output_project_id, dataset_info.parent_id
1511
+ )
1512
+ output_dataset_info = api.dataset.create(
1513
+ output_project_id, dataset_info.name, parent_id=parent_dataset_id
1514
+ )
1515
+ api.image.copy_batch_optimized(
1516
+ dataset_info.id,
1517
+ images_infos_dict[dataset_info.id],
1518
+ output_dataset_info.id,
1519
+ with_annotations=False,
1520
+ )
1521
+ output_dataset_id = output_dataset_info.id
1504
1522
  new_dataset_id[src_dataset_id] = output_dataset_id
1505
1523
  return output_dataset_id
1506
1524
 
@@ -1775,7 +1793,9 @@ class Inference:
1775
1793
  stop = False
1776
1794
 
1777
1795
  def image_batch_generator(batch_size):
1778
- logger.debug(f"image_batch_generator. images_infos={len(images_infos)}, batch_size={batch_size}")
1796
+ logger.debug(
1797
+ f"image_batch_generator. images_infos={len(images_infos)}, batch_size={batch_size}"
1798
+ )
1779
1799
  batch = []
1780
1800
  while True:
1781
1801
  for image_info in images_infos:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.200
3
+ Version: 6.73.201
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -718,28 +718,28 @@ supervisely/nn/artifacts/unet.py,sha256=Gn8ADfwC4F-MABVDPRY7g_ZaAIaaOAEbhhIGII-o
718
718
  supervisely/nn/artifacts/yolov5.py,sha256=6KDCyDlLO7AT9of1qHjCaG5mmxCv6C0p-zCk9KJ0PH4,1478
719
719
  supervisely/nn/artifacts/yolov8.py,sha256=c3MzbOTYD6RT5N4F9oZ0SWXxyonjJ6ZQfZLYUHPRZg4,1204
720
720
  supervisely/nn/benchmark/__init__.py,sha256=RxqbBx7cbzookq2DRvxYIaRofON9uxHeY5h8DqDbZq0,187
721
- supervisely/nn/benchmark/base_benchmark.py,sha256=6XfsXt9ERk2-l6Zkq2OleITzAxLwtRALIxAjmQptikU,21332
721
+ supervisely/nn/benchmark/base_benchmark.py,sha256=f0TlgPdtl5-hWe38k1q4Jhld48kiDIhhWmGGJoO-FGA,22366
722
722
  supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
723
723
  supervisely/nn/benchmark/instance_segmentation_benchmark.py,sha256=9iiWEH7KDw7ps0mQQdzIrCtCKg4umHekF3ws7jIGjmE,938
724
724
  supervisely/nn/benchmark/object_detection_benchmark.py,sha256=s1S-L952etgz-UsDPyg69AgmFfAoJXvFHhITT8zB5iw,956
725
725
  supervisely/nn/benchmark/utils.py,sha256=evcoUFPkeEW1-GvAPYp8EoOv3WAsqzaSmCY4lbLLfAQ,607
726
726
  supervisely/nn/benchmark/coco_utils/__init__.py,sha256=MKxuzzBWpRCwR8kOb5NXUK8vD-2mroJn48xd6tv9FeI,139
727
- supervisely/nn/benchmark/coco_utils/sly2coco.py,sha256=PqOceub50i7l2uLIN7xyAHVcSBV08CPMNgfQEH9EH64,6995
727
+ supervisely/nn/benchmark/coco_utils/sly2coco.py,sha256=iudlcHNynthscH-V5qwCLk6VgIcxYrMEuAfGIjrOjZ0,6867
728
728
  supervisely/nn/benchmark/coco_utils/utils.py,sha256=J9kM_Cn4XxfsrSQ8Rx6eb1UsS65-wOybaCkI9rQDeiU,504
729
729
  supervisely/nn/benchmark/evaluation/__init__.py,sha256=1NGV_xEGe9lyPdE5gJ8AASKzm2WyZ_jKlh9WVvCQIaY,287
730
730
  supervisely/nn/benchmark/evaluation/base_evaluator.py,sha256=Ac1EsvRrMH-Fck1aVS9T2Tx1m9PfialRA3z8XJs5e8U,1039
731
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=qdWPBK8VuenOFM-iK6lqvMAr0AoUxTz8MOUGTAtdJBM,2624
732
- supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=qVVCXkCEbff3YfLif6b18ldbBA3w2bwwdbYhC7HZjQI,2494
731
+ supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=oskpLBSwo_u224m_fc-oVJD0GGWgsyR9HrBVEEQ-FAE,3101
732
+ supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=5XrTnNpgdZgJ-LgXdUd74OvZLTQEenltxTSO12bTwqg,2943
733
733
  supervisely/nn/benchmark/evaluation/coco/__init__.py,sha256=l6dFxp9aenywosQzQkIaDEI1p-DDQ63OgJJXxSVB4Mk,172
734
734
  supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py,sha256=tgUAXngl0QcGpSdGvZRVo6f_0YP_PF4Leu2fpx5a_Us,10702
735
735
  supervisely/nn/benchmark/evaluation/coco/metric_provider.py,sha256=j4YMk20t3lsX3QnsSIRjEYx8EayHw77I4KdXxKfgxeI,17513
736
736
  supervisely/nn/benchmark/evaluation/coco/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
737
737
  supervisely/nn/benchmark/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
738
738
  supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=4QdBZqJmmPYdcB7x565zOtXhDFRyXIB4tpu0V-_otoc,3724
739
- supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=Lu-wIOCxn4_7kJIS3HP-dUmF0OTOth9J1Y9onl3Rm64,13864
739
+ supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=NJBduyKE0UPihPtM2BR2eOdDwkUWZ3lMBpZGy9UFOZ0,13859
740
740
  supervisely/nn/benchmark/visualization/vis_templates.py,sha256=tDPQcuByvnDdfGdDaT-KhemnKCtieunp-MgnGAPsbrQ,9905
741
741
  supervisely/nn/benchmark/visualization/vis_widgets.py,sha256=CsT7DSfxH4g4zHsmm_7RCJf3YR6zXiADuYhUIIGdn7w,4073
742
- supervisely/nn/benchmark/visualization/visualizer.py,sha256=q0vsO_LpmTM1o5VH_Ii5nOOyh44C3owyuVXDV9tiPGE,30616
742
+ supervisely/nn/benchmark/visualization/visualizer.py,sha256=BLu31ETO202AgpT1gjiAry-m_hk3ExzyFUsQtOHaeqU,31729
743
743
  supervisely/nn/benchmark/visualization/inference_speed/__init__.py,sha256=6Nahwt9R61_Jc1eWupXa70CgyRQ7tbUeiDWR26017rY,554
744
744
  supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py,sha256=73gbXs1uTfxxWH-UCJdR72m-48jMD5qVyMyolf5jNoc,6140
745
745
  supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py,sha256=ivUVriKyhx9ZtwVSqrAkUqq1SJGYYxNLwLQR1UgE4aM,900
@@ -747,18 +747,18 @@ supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py,sha
747
747
  supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py,sha256=bVpNS3YBP0TGsqE_XQBuFMJI5ybDM0RZpEzFyT7cbkA,2157
748
748
  supervisely/nn/benchmark/visualization/text_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
749
749
  supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py,sha256=XGeBrbP-ROyKYbqYZzA281_IG45Ygu9NKyqG2I3o5TU,1124
750
- supervisely/nn/benchmark/visualization/text_templates/instance_segmentation_text.py,sha256=Ju6x5d44yEBuQtmX9zpUdA5GPd0THKmxanhszzz6hQ0,25428
751
- supervisely/nn/benchmark/visualization/text_templates/object_detection_text.py,sha256=5SWOYgow2PNBuEg-luGEHEKemXMS5IfEY-gjZJoxiCw,24827
750
+ supervisely/nn/benchmark/visualization/text_templates/instance_segmentation_text.py,sha256=ud_XqN3q8nbpAvk3JvW_8LNcmnkq7B-MuLgILgSVeJA,25116
751
+ supervisely/nn/benchmark/visualization/text_templates/object_detection_text.py,sha256=jKHeKZGwEP1rnKpKHpf9x5iP9L8JaFMkvnFcayxvgeI,24508
752
752
  supervisely/nn/benchmark/visualization/vis_metrics/__init__.py,sha256=Qrd9NMgFUQ1nbEy4NEu59RXR4OmVaBdK_iLOGpwVRCA,2249
753
753
  supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py,sha256=8S_XYSA_qyVWAEnvebdjuw9ktJScDHgTVvZRfh-bvpc,1841
754
754
  supervisely/nn/benchmark/visualization/vis_metrics/confidence_distribution.py,sha256=m-z0-jPn3dd_X-w49Zjkb4qEEP6Rw6fVpSjgSuJoeRw,3794
755
755
  supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py,sha256=F38WSVF2a2ePV7DWfYdxDlSjM68NvjPMccfaTQWg0uI,3356
756
756
  supervisely/nn/benchmark/visualization/vis_metrics/confusion_matrix.py,sha256=SFDRpoJX-IJYv-SoCXOyhXZTpVUm6G1IYCe15Nei9Uc,3362
757
- supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py,sha256=gnPUYOkVcBMcUDb5ElmtHXRzWj5cXuodc8KoNB4DO7I,5743
757
+ supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py,sha256=-TmGMWr6bHZd80TBGk3S_qdDQ-xlAL-zczAYw8h9vtU,5738
758
758
  supervisely/nn/benchmark/visualization/vis_metrics/f1_score_at_different_iou.py,sha256=QbHUT0-beBs0z8anbma1MS6iNEG89CmL6iSj8-ejnlc,3158
759
759
  supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py,sha256=2lhwqaXdIEXhFSaqBYcJjRLp_OgBTEYxajYib0v4qRQ,3992
760
760
  supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py,sha256=Kir2F3piCBf-xKhoQzVXbo38zyrLLhOr3bYay90H1-g,3015
761
- supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py,sha256=kVhKVfH_OvnK7vyxoogZaHTrsztT2qWaqdLcq2IX6ew,6118
761
+ supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py,sha256=38_dJAzAfJNsfFOg_bqE3B-btrk1K_sQOpwk26Y64Nk,6134
762
762
  supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py,sha256=rsm_hdE0pYCHY-5v0pjDIid71y2tPbzYbmH2Qw-RS-4,3983
763
763
  supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py,sha256=lSb2-jfplyERIUCi8_6P9aq6C77JGOKOJK20J824sEE,5623
764
764
  supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py,sha256=YHfueea2EkUgNGP4FCyKyCaCtCwaYeYNJ3WwfF-Hzi4,3553
@@ -766,14 +766,14 @@ supervisely/nn/benchmark/visualization/vis_metrics/overview.py,sha256=_TwrBnkEfw
766
766
  supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py,sha256=mm8IVM90EoIC_9GsiM-Jyhh6jPqQcHMo788VAvRAzMY,1877
767
767
  supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py,sha256=4-AwEQk1ywuW4zXO_EXo7_aFMjenwhnLlGX2PWqiu0k,3574
768
768
  supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py,sha256=9Uuibo38HVGPChPbCW8i3cMYdb6-NFlys1TBisp5zOU,1442
769
- supervisely/nn/benchmark/visualization/vis_metrics/precision.py,sha256=UBMz9hn0REVf1e0XGnBpFBE6oGxNEv3zbVB3KLGIwos,2124
770
- supervisely/nn/benchmark/visualization/vis_metrics/recall.py,sha256=y_xwTQkmmIvBXzapmeZFZ3LdAeWjZEdj3-3sRnWvHrA,2046
769
+ supervisely/nn/benchmark/visualization/vis_metrics/precision.py,sha256=_ATnCkson-tSOv3xp7LI7BmwzIax75zuKs7VoeBZ_ds,2156
770
+ supervisely/nn/benchmark/visualization/vis_metrics/recall.py,sha256=trRMw8ziWMaDp5cmJuwFaJ6aO_rfsTbCYb9LMdr9D_Q,2078
771
771
  supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py,sha256=RuN3tQA3Zt5MmCha8T0WgWDIvzURjsqfL4ap_LYqN-Y,1859
772
772
  supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py,sha256=Vxta2s0RTTcV0GCcMiF8CykCtZYryLTwGjW9vVUrK3I,3107
773
773
  supervisely/nn/benchmark/visualization/vis_metrics/what_is.py,sha256=MDnYR-o7Mj-YE1Jwu9EcLUEPcu6rLknRx7LvV4nnUBo,842
774
774
  supervisely/nn/inference/__init__.py,sha256=mtEci4Puu-fRXDnGn8RP47o97rv3VTE0hjbYO34Zwqg,1622
775
775
  supervisely/nn/inference/cache.py,sha256=vjFYIkoV-txzAl_C_WKvS5odccBU8GHFY8iTxxnSqLU,25619
776
- supervisely/nn/inference/inference.py,sha256=DwvyF6kKR68kBUPh3W1Vj0DDJIh0lmNxB9eLiCsZC48,113744
776
+ supervisely/nn/inference/inference.py,sha256=7N6fMqOd18VQoTCaP4slc_cuL5ovc43FIpWomv6PtEc,114524
777
777
  supervisely/nn/inference/session.py,sha256=jmkkxbe2kH-lEgUU6Afh62jP68dxfhF5v6OGDfLU62E,35757
778
778
  supervisely/nn/inference/video_inference.py,sha256=8Bshjr6rDyLay5Za8IB8Dr6FURMO2R_v7aELasO8pR4,5746
779
779
  supervisely/nn/inference/gui/__init__.py,sha256=e3RKi93bI1r_0Dkvs_gaR1p_jkzkBMNjrcx-RVlm93k,88
@@ -953,9 +953,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
953
953
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
954
954
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
955
955
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
956
- supervisely-6.73.200.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
957
- supervisely-6.73.200.dist-info/METADATA,sha256=7nxrYfel8T-VAi0k_ist_K7npAJifyIQe2Wwv9H0ip0,33077
958
- supervisely-6.73.200.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
959
- supervisely-6.73.200.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
960
- supervisely-6.73.200.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
961
- supervisely-6.73.200.dist-info/RECORD,,
956
+ supervisely-6.73.201.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
957
+ supervisely-6.73.201.dist-info/METADATA,sha256=ZY58o7Meo4VBsGD1N9EDwkQ9cO9c5ECQoopB--t71WY,33077
958
+ supervisely-6.73.201.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
959
+ supervisely-6.73.201.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
960
+ supervisely-6.73.201.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
961
+ supervisely-6.73.201.dist-info/RECORD,,