supervisely 6.73.200__py3-none-any.whl → 6.73.202__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/api/file_api.py +35 -1
- supervisely/nn/benchmark/base_benchmark.py +28 -2
- supervisely/nn/benchmark/coco_utils/sly2coco.py +7 -11
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +19 -9
- supervisely/nn/benchmark/evaluation/object_detection_evaluator.py +11 -2
- supervisely/nn/benchmark/visualization/text_templates/instance_segmentation_text.py +1 -2
- supervisely/nn/benchmark/visualization/text_templates/object_detection_text.py +1 -2
- supervisely/nn/benchmark/visualization/vis_metric_base.py +1 -1
- supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py +1 -1
- supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py +1 -1
- supervisely/nn/benchmark/visualization/vis_metrics/precision.py +1 -0
- supervisely/nn/benchmark/visualization/vis_metrics/recall.py +1 -0
- supervisely/nn/benchmark/visualization/visualizer.py +31 -2
- supervisely/nn/inference/inference.py +24 -4
- {supervisely-6.73.200.dist-info → supervisely-6.73.202.dist-info}/METADATA +1 -1
- {supervisely-6.73.200.dist-info → supervisely-6.73.202.dist-info}/RECORD +20 -20
- {supervisely-6.73.200.dist-info → supervisely-6.73.202.dist-info}/LICENSE +0 -0
- {supervisely-6.73.200.dist-info → supervisely-6.73.202.dist-info}/WHEEL +0 -0
- {supervisely-6.73.200.dist-info → supervisely-6.73.202.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.200.dist-info → supervisely-6.73.202.dist-info}/top_level.txt +0 -0
supervisely/api/file_api.py
CHANGED
|
@@ -806,6 +806,40 @@ class FileApi(ModuleApiBase):
|
|
|
806
806
|
api.file.upload_bulk(8, src_paths, dst_remote_paths)
|
|
807
807
|
"""
|
|
808
808
|
|
|
809
|
+
def _group_files_generator(
|
|
810
|
+
src_paths: List[str], dst_paths: List[str], limit: int = 20 * 1024 * 1024
|
|
811
|
+
):
|
|
812
|
+
if limit is None:
|
|
813
|
+
return src_paths, dst_paths
|
|
814
|
+
group_src = []
|
|
815
|
+
group_dst = []
|
|
816
|
+
total_size = 0
|
|
817
|
+
for src, dst in zip(src_paths, dst_paths):
|
|
818
|
+
size = os.path.getsize(src)
|
|
819
|
+
if total_size > 0 and total_size + size > limit:
|
|
820
|
+
yield group_src, group_dst
|
|
821
|
+
group_src = []
|
|
822
|
+
group_dst = []
|
|
823
|
+
total_size = 0
|
|
824
|
+
group_src.append(src)
|
|
825
|
+
group_dst.append(dst)
|
|
826
|
+
total_size += size
|
|
827
|
+
if total_size > 0:
|
|
828
|
+
yield group_src, group_dst
|
|
829
|
+
|
|
830
|
+
file_infos = []
|
|
831
|
+
for src, dst in _group_files_generator(src_paths, dst_paths):
|
|
832
|
+
file_infos.extend(self._upload_bulk(team_id, src, dst, progress_cb))
|
|
833
|
+
return file_infos
|
|
834
|
+
|
|
835
|
+
def _upload_bulk(
|
|
836
|
+
self,
|
|
837
|
+
team_id: int,
|
|
838
|
+
src_paths: List[str],
|
|
839
|
+
dst_paths: List[str],
|
|
840
|
+
progress_cb: Optional[Union[tqdm, Callable]] = None,
|
|
841
|
+
) -> List[FileInfo]:
|
|
842
|
+
|
|
809
843
|
def path_to_bytes_stream(path):
|
|
810
844
|
return open(path, "rb")
|
|
811
845
|
|
|
@@ -1338,7 +1372,7 @@ class FileApi(ModuleApiBase):
|
|
|
1338
1372
|
"""
|
|
1339
1373
|
if not remote_dir.startswith("/"):
|
|
1340
1374
|
remote_dir = "/" + remote_dir
|
|
1341
|
-
|
|
1375
|
+
|
|
1342
1376
|
if self.dir_exists(team_id, remote_dir):
|
|
1343
1377
|
if change_name_if_conflict is True:
|
|
1344
1378
|
res_remote_dir = self.get_free_dir_name(team_id, remote_dir)
|
|
@@ -449,6 +449,28 @@ class BaseBenchmark:
|
|
|
449
449
|
vis.visualize()
|
|
450
450
|
|
|
451
451
|
def _get_or_create_diff_project(self) -> Tuple[ProjectInfo, bool]:
|
|
452
|
+
|
|
453
|
+
dt_ds_id_to_diff_ds_info = {}
|
|
454
|
+
|
|
455
|
+
def _get_or_create_diff_dataset(dt_dataset_id, dt_datasets):
|
|
456
|
+
if dt_dataset_id in dt_ds_id_to_diff_ds_info:
|
|
457
|
+
return dt_ds_id_to_diff_ds_info[dt_dataset_id]
|
|
458
|
+
dt_dataset = dt_datasets[dt_dataset_id]
|
|
459
|
+
if dt_dataset.parent_id is None:
|
|
460
|
+
diff_dataset = self.api.dataset.create(
|
|
461
|
+
diff_project_info.id,
|
|
462
|
+
dt_dataset.name,
|
|
463
|
+
)
|
|
464
|
+
else:
|
|
465
|
+
parent_dataset = _get_or_create_diff_dataset(dt_dataset.parent_id, dt_datasets)
|
|
466
|
+
diff_dataset = self.api.dataset.create(
|
|
467
|
+
diff_project_info.id,
|
|
468
|
+
dt_dataset.name,
|
|
469
|
+
parent_id=parent_dataset.id,
|
|
470
|
+
)
|
|
471
|
+
dt_ds_id_to_diff_ds_info[dt_dataset_id] = diff_dataset
|
|
472
|
+
return diff_dataset
|
|
473
|
+
|
|
452
474
|
diff_project_name = self._generate_diff_project_name(self.dt_project_info.name)
|
|
453
475
|
diff_workspace_id = self.dt_project_info.workspace_id
|
|
454
476
|
diff_project_info = self.api.project.get_info_by_name(
|
|
@@ -460,8 +482,12 @@ class BaseBenchmark:
|
|
|
460
482
|
diff_project_info = self.api.project.create(
|
|
461
483
|
diff_workspace_id, diff_project_name, change_name_if_conflict=True
|
|
462
484
|
)
|
|
463
|
-
|
|
464
|
-
|
|
485
|
+
dt_datasets = {
|
|
486
|
+
ds.id: ds
|
|
487
|
+
for ds in self.api.dataset.get_list(self.dt_project_info.id, recursive=True)
|
|
488
|
+
}
|
|
489
|
+
for dataset in dt_datasets:
|
|
490
|
+
_get_or_create_diff_dataset(dataset, dt_datasets)
|
|
465
491
|
return diff_project_info, is_existed
|
|
466
492
|
|
|
467
493
|
def upload_visualizations(self, dest_dir: str):
|
|
@@ -7,7 +7,7 @@ import numpy as np
|
|
|
7
7
|
|
|
8
8
|
from supervisely import Bitmap
|
|
9
9
|
from supervisely._utils import batched
|
|
10
|
-
from supervisely.project.project import
|
|
10
|
+
from supervisely.project.project import Dataset, OpenMode, Project
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def sly2coco(
|
|
@@ -20,11 +20,7 @@ def sly2coco(
|
|
|
20
20
|
):
|
|
21
21
|
from pycocotools import mask as maskUtils # pylint: disable=import-error
|
|
22
22
|
|
|
23
|
-
|
|
24
|
-
name
|
|
25
|
-
for name in os.listdir(sly_project_path)
|
|
26
|
-
if os.path.isdir(pjoin(sly_project_path, name))
|
|
27
|
-
]
|
|
23
|
+
project = Project(sly_project_path, mode=OpenMode.READ)
|
|
28
24
|
|
|
29
25
|
# Categories
|
|
30
26
|
meta_path = pjoin(sly_project_path, "meta.json")
|
|
@@ -52,13 +48,13 @@ def sly2coco(
|
|
|
52
48
|
images = []
|
|
53
49
|
annotations = []
|
|
54
50
|
annotation_id = 1
|
|
55
|
-
project = Project(sly_project_path, mode=OpenMode.READ)
|
|
56
51
|
total = project.total_items
|
|
57
52
|
with progress(message="Evaluation: Converting to COCO format", total=total) as pbar:
|
|
58
53
|
img_id = 1
|
|
59
|
-
for
|
|
60
|
-
|
|
61
|
-
|
|
54
|
+
for dataset in sorted(project.datasets, key=lambda x: x.name):
|
|
55
|
+
dataset: Dataset
|
|
56
|
+
ann_path = dataset.ann_dir
|
|
57
|
+
imginfo_path = dataset.img_info_dir
|
|
62
58
|
ann_files = sorted(os.listdir(ann_path))
|
|
63
59
|
for batch in batched(ann_files, 30):
|
|
64
60
|
for ann_file in batch:
|
|
@@ -75,7 +71,7 @@ def sly2coco(
|
|
|
75
71
|
"width": img_w,
|
|
76
72
|
"height": img_h,
|
|
77
73
|
"sly_id": img_info["id"],
|
|
78
|
-
"dataset":
|
|
74
|
+
"dataset": dataset.name,
|
|
79
75
|
}
|
|
80
76
|
images.append(img)
|
|
81
77
|
for label in ann["objects"]:
|
|
@@ -1,22 +1,28 @@
|
|
|
1
1
|
import os
|
|
2
|
+
|
|
2
3
|
from supervisely.io.json import dump_json_file
|
|
3
|
-
from supervisely.nn.benchmark.evaluation import BaseEvaluator
|
|
4
4
|
from supervisely.nn.benchmark.coco_utils import read_coco_datasets, sly2coco
|
|
5
|
+
from supervisely.nn.benchmark.evaluation import BaseEvaluator
|
|
5
6
|
from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
class InstanceSegmentationEvaluator(BaseEvaluator):
|
|
9
10
|
def evaluate(self):
|
|
10
|
-
|
|
11
|
+
try:
|
|
12
|
+
self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
|
|
13
|
+
except AssertionError as e:
|
|
14
|
+
raise ValueError(
|
|
15
|
+
f"{e}. Please make sure that your GT and DT projects are correct. "
|
|
16
|
+
"If GT project has nested datasets and DT project was crated with NN app, "
|
|
17
|
+
"try to use newer version of NN app."
|
|
18
|
+
)
|
|
19
|
+
|
|
11
20
|
self._dump_datasets()
|
|
12
21
|
self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
|
|
13
22
|
with self.pbar(message="Evaluation: Calculating metrics", total=10) as p:
|
|
14
23
|
self.eval_data = calculate_metrics(
|
|
15
|
-
self.cocoGt,
|
|
16
|
-
|
|
17
|
-
iouType="segm",
|
|
18
|
-
progress_cb=p.update
|
|
19
|
-
)
|
|
24
|
+
self.cocoGt, self.cocoDt, iouType="segm", progress_cb=p.update
|
|
25
|
+
)
|
|
20
26
|
self._dump_eval_results()
|
|
21
27
|
|
|
22
28
|
def _convert_to_coco(self):
|
|
@@ -41,8 +47,12 @@ class InstanceSegmentationEvaluator(BaseEvaluator):
|
|
|
41
47
|
"Not found any predictions. "
|
|
42
48
|
"Please make sure that your model produces predictions."
|
|
43
49
|
)
|
|
44
|
-
assert
|
|
45
|
-
|
|
50
|
+
assert (
|
|
51
|
+
cocoDt_json["categories"] == cocoGt_json["categories"]
|
|
52
|
+
), "Object classes in GT and DT projects are different"
|
|
53
|
+
assert [f'{x["dataset"]}/{x["file_name"]}' for x in cocoDt_json["images"]] == [
|
|
54
|
+
f'{x["dataset"]}/{x["file_name"]}' for x in cocoGt_json["images"]
|
|
55
|
+
], "Images in GT and DT projects are different"
|
|
46
56
|
return cocoGt_json, cocoDt_json
|
|
47
57
|
|
|
48
58
|
def _dump_datasets(self):
|
|
@@ -8,7 +8,14 @@ from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
|
|
|
8
8
|
|
|
9
9
|
class ObjectDetectionEvaluator(BaseEvaluator):
|
|
10
10
|
def evaluate(self):
|
|
11
|
-
|
|
11
|
+
try:
|
|
12
|
+
self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
|
|
13
|
+
except AssertionError as e:
|
|
14
|
+
raise ValueError(
|
|
15
|
+
f"{e}. Please make sure that your GT and DT projects are correct. "
|
|
16
|
+
"If GT project has nested datasets and DT project was crated with NN app, "
|
|
17
|
+
"try to use newer version of NN app."
|
|
18
|
+
)
|
|
12
19
|
self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
|
|
13
20
|
with self.pbar(message="Evaluation: Calculating metrics", total=10) as p:
|
|
14
21
|
self.eval_data = calculate_metrics(
|
|
@@ -42,7 +49,9 @@ class ObjectDetectionEvaluator(BaseEvaluator):
|
|
|
42
49
|
assert (
|
|
43
50
|
cocoDt_json["categories"] == cocoGt_json["categories"]
|
|
44
51
|
), "Classes in GT and Pred projects must be the same"
|
|
45
|
-
assert [x["
|
|
52
|
+
assert [f'{x["dataset"]}/{x["file_name"]}' for x in cocoDt_json["images"]] == [
|
|
53
|
+
f'{x["dataset"]}/{x["file_name"]}' for x in cocoGt_json["images"]
|
|
54
|
+
], "Images in GT and DT projects are different"
|
|
46
55
|
return cocoGt_json, cocoDt_json
|
|
47
56
|
|
|
48
57
|
def _dump_eval_results(self):
|
|
@@ -49,7 +49,7 @@ Learn more about Model Benchmark, implementation details, and how to use the cha
|
|
|
49
49
|
|
|
50
50
|
markdown_key_metrics = """## Key Metrics
|
|
51
51
|
|
|
52
|
-
Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy
|
|
52
|
+
Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy and Calibration Score.
|
|
53
53
|
|
|
54
54
|
- **Mean Average Precision (mAP)**: A comprehensive metric of detection and instance segmentation performance. mAP calculates the <abbr title="{}">average precision</abbr> across all classes at different levels of <abbr title="{}">IoU thresholds</abbr> and precision-recall trade-offs. In other words, it evaluates the performance of a model by considering its ability to detect and localize objects accurately across multiple IoU thresholds and object categories.
|
|
55
55
|
- **Precision**: Precision indicates how often the model's predictions are actually correct when it predicts an object. This calculates the ratio of correct predictions to the total number of predictions made by the model.
|
|
@@ -57,7 +57,6 @@ Here, we comprehensively assess the model's performance by presenting a broad se
|
|
|
57
57
|
- **Intersection over Union (IoU)**: IoU measures the overlap between two masks: one predicted by the model and one from the ground truth. It is calculated as the area of intersection between the predicted mask and the ground truth mask, divided by the area of their union. A higher IoU score indicates better alignment between the predicted and ground truth masks.
|
|
58
58
|
- **Classification Accuracy**: We additionally measure the classification accuracy of an instance segmentation model. This metric represents the percentage of correctly labeled instances among all instances where the predicted segmentation masks accurately match the ground truth masks (with an IoU greater than 0.5, regardless of class).
|
|
59
59
|
- **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
|
|
60
|
-
- **Inference Speed**: The number of frames per second (FPS) the model can process, measured with a batch size of 1. The inference speed is important in applications, where real-time inference is required. Additionally, slower models pour more GPU resources, so their inference cost is higher.
|
|
61
60
|
"""
|
|
62
61
|
|
|
63
62
|
markdown_explorer = """## Explore Predictions
|
|
@@ -49,7 +49,7 @@ Learn more about Model Benchmark, implementation details, and how to use the cha
|
|
|
49
49
|
|
|
50
50
|
markdown_key_metrics = """## Key Metrics
|
|
51
51
|
|
|
52
|
-
Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy
|
|
52
|
+
Here, we comprehensively assess the model's performance by presenting a broad set of metrics, including mAP (mean Average Precision), Precision, Recall, IoU (Intersection over Union), Classification Accuracy and Calibration Score.
|
|
53
53
|
|
|
54
54
|
- **Mean Average Precision (mAP)**: A comprehensive metric of detection performance. mAP calculates the <abbr title="{}">average precision</abbr> across all classes at different levels of <abbr title="{}">IoU thresholds</abbr> and precision-recall trade-offs. In other words, it evaluates the performance of a model by considering its ability to detect and localize objects accurately across multiple IoU thresholds and object categories.
|
|
55
55
|
- **Precision**: Precision indicates how often the model's predictions are actually correct when it predicts an object. This calculates the ratio of correct detections to the total number of detections made by the model.
|
|
@@ -57,7 +57,6 @@ Here, we comprehensively assess the model's performance by presenting a broad se
|
|
|
57
57
|
- **Intersection over Union (IoU)**: IoU measures how closely predicted bounding boxes match the actual (ground truth) bounding boxes. It is calculated as the area of overlap between the predicted bounding box and the ground truth bounding box, divided by the area of union of these bounding boxes.
|
|
58
58
|
- **Classification Accuracy**: We additionally measure the classification accuracy of an object detection model. This metric represents the percentage of correctly labeled instances among all correctly localized bounding boxes (where the IoU for each box is greater than 0.5, regardless of class).
|
|
59
59
|
- **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
|
|
60
|
-
- **Inference Speed**: The number of frames per second (FPS) the model can process, measured with a batch size of 1. The inference speed is important in applications, where real-time object detection is required. Additionally, slower models pour more GPU resources, so their inference cost is higher.
|
|
61
60
|
"""
|
|
62
61
|
|
|
63
62
|
markdown_explorer = """## Explore Predictions
|
|
@@ -291,7 +291,7 @@ class MetricVis:
|
|
|
291
291
|
zip(image_infos, ann_infos, project_metas)
|
|
292
292
|
):
|
|
293
293
|
image_name = image_info.name
|
|
294
|
-
image_url = image_info.
|
|
294
|
+
image_url = image_info.preview_url
|
|
295
295
|
is_ignore = True if idx in [0, 1] else False
|
|
296
296
|
widget.gallery.append(
|
|
297
297
|
title=image_name,
|
|
@@ -37,7 +37,7 @@ class ExplorerGrid(MetricVis):
|
|
|
37
37
|
|
|
38
38
|
for idx, (pred_image, ann_info) in enumerate(zip(pred_image_infos, ann_infos)):
|
|
39
39
|
image_name = pred_image.name
|
|
40
|
-
image_url = pred_image.
|
|
40
|
+
image_url = pred_image.preview_url
|
|
41
41
|
widget.gallery.append(
|
|
42
42
|
title=image_name,
|
|
43
43
|
image_url=image_url,
|
|
@@ -33,7 +33,7 @@ class ModelPredictions(MetricVis):
|
|
|
33
33
|
dt_project_id = self._loader.dt_project_info.id
|
|
34
34
|
|
|
35
35
|
tmp = set()
|
|
36
|
-
for dt_dataset in self._loader._api.dataset.get_list(dt_project_id):
|
|
36
|
+
for dt_dataset in self._loader._api.dataset.get_list(dt_project_id, recursive=True):
|
|
37
37
|
names = [x.name for x in self._loader._api.image.get_list(dt_dataset.id)]
|
|
38
38
|
tmp.update(names)
|
|
39
39
|
df = self._loader.mp.prediction_table().round(2)
|
|
@@ -42,6 +42,7 @@ class Precision(MetricVis):
|
|
|
42
42
|
y="precision",
|
|
43
43
|
# title="Per-class Precision (Sorted by F1)",
|
|
44
44
|
color="precision",
|
|
45
|
+
range_color=[0, 1],
|
|
45
46
|
color_continuous_scale="Plasma",
|
|
46
47
|
)
|
|
47
48
|
fig.update_traces(hovertemplate="Class: %{x}<br>Precision: %{y:.2f}<extra></extra>")
|
|
@@ -361,8 +361,19 @@ class Visualizer:
|
|
|
361
361
|
|
|
362
362
|
gt_project = Project(gt_project_path, OpenMode.READ)
|
|
363
363
|
pred_project = Project(pred_project_path, OpenMode.READ)
|
|
364
|
+
diff_dataset_id_to_info = {
|
|
365
|
+
ds.id: ds
|
|
366
|
+
for ds in self._api.dataset.get_list(self.diff_project_info.id, recursive=True)
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
def _get_full_name(ds_id: int):
|
|
370
|
+
ds_info = diff_dataset_id_to_info[ds_id]
|
|
371
|
+
if ds_info.parent_id is None:
|
|
372
|
+
return ds_info.name
|
|
373
|
+
return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
|
|
374
|
+
|
|
364
375
|
diff_dataset_name_to_info = {
|
|
365
|
-
|
|
376
|
+
_get_full_name(ds_id): ds_info for ds_id, ds_info in diff_dataset_id_to_info.items()
|
|
366
377
|
}
|
|
367
378
|
|
|
368
379
|
matched_id_map = self._get_matched_id_map() # dt_id -> gt_id
|
|
@@ -378,6 +389,13 @@ class Visualizer:
|
|
|
378
389
|
with self.pbar(
|
|
379
390
|
message="Visualizations: Creating diff_project", total=pred_project.total_items
|
|
380
391
|
) as progress:
|
|
392
|
+
logger.debug(
|
|
393
|
+
"Creating diff project data",
|
|
394
|
+
extra={
|
|
395
|
+
"pred_project": [ds.name for ds in pred_project.datasets],
|
|
396
|
+
"gt_project": [ds.name for ds in gt_project.datasets],
|
|
397
|
+
},
|
|
398
|
+
)
|
|
381
399
|
for pred_dataset in pred_project.datasets:
|
|
382
400
|
pred_dataset: Dataset
|
|
383
401
|
gt_dataset: Dataset = gt_project.datasets.get(pred_dataset.name)
|
|
@@ -460,8 +478,19 @@ class Visualizer:
|
|
|
460
478
|
gt_project_path, pred_project_path = self._benchmark._download_projects(save_images=False)
|
|
461
479
|
gt_project = Project(gt_project_path, OpenMode.READ)
|
|
462
480
|
pred_project = Project(pred_project_path, OpenMode.READ)
|
|
481
|
+
diff_dataset_id_to_info = {
|
|
482
|
+
ds.id: ds
|
|
483
|
+
for ds in self._api.dataset.get_list(self.diff_project_info.id, recursive=True)
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
def _get_full_name(ds_id: int):
|
|
487
|
+
ds_info = diff_dataset_id_to_info[ds_id]
|
|
488
|
+
if ds_info.parent_id is None:
|
|
489
|
+
return ds_info.name
|
|
490
|
+
return f"{_get_full_name(ds_info.parent_id)}/{ds_info.name}"
|
|
491
|
+
|
|
463
492
|
diff_dataset_name_to_info = {
|
|
464
|
-
|
|
493
|
+
_get_full_name(ds_id): ds_info for ds_id, ds_info in diff_dataset_id_to_info.items()
|
|
465
494
|
}
|
|
466
495
|
|
|
467
496
|
for pred_dataset in pred_project.datasets:
|
|
@@ -1498,9 +1498,27 @@ class Inference:
|
|
|
1498
1498
|
if src_dataset_id in new_dataset_id:
|
|
1499
1499
|
return new_dataset_id[src_dataset_id]
|
|
1500
1500
|
dataset_info = api.dataset.get_info_by_id(src_dataset_id)
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1501
|
+
if dataset_info.parent_id is None:
|
|
1502
|
+
output_dataset_id = api.dataset.copy(
|
|
1503
|
+
output_project_id,
|
|
1504
|
+
src_dataset_id,
|
|
1505
|
+
dataset_info.name,
|
|
1506
|
+
change_name_if_conflict=True,
|
|
1507
|
+
).id
|
|
1508
|
+
else:
|
|
1509
|
+
parent_dataset_id = _get_or_create_new_dataset(
|
|
1510
|
+
output_project_id, dataset_info.parent_id
|
|
1511
|
+
)
|
|
1512
|
+
output_dataset_info = api.dataset.create(
|
|
1513
|
+
output_project_id, dataset_info.name, parent_id=parent_dataset_id
|
|
1514
|
+
)
|
|
1515
|
+
api.image.copy_batch_optimized(
|
|
1516
|
+
dataset_info.id,
|
|
1517
|
+
images_infos_dict[dataset_info.id],
|
|
1518
|
+
output_dataset_info.id,
|
|
1519
|
+
with_annotations=False,
|
|
1520
|
+
)
|
|
1521
|
+
output_dataset_id = output_dataset_info.id
|
|
1504
1522
|
new_dataset_id[src_dataset_id] = output_dataset_id
|
|
1505
1523
|
return output_dataset_id
|
|
1506
1524
|
|
|
@@ -1775,7 +1793,9 @@ class Inference:
|
|
|
1775
1793
|
stop = False
|
|
1776
1794
|
|
|
1777
1795
|
def image_batch_generator(batch_size):
|
|
1778
|
-
logger.debug(
|
|
1796
|
+
logger.debug(
|
|
1797
|
+
f"image_batch_generator. images_infos={len(images_infos)}, batch_size={batch_size}"
|
|
1798
|
+
)
|
|
1779
1799
|
batch = []
|
|
1780
1800
|
while True:
|
|
1781
1801
|
for image_info in images_infos:
|
|
@@ -25,7 +25,7 @@ supervisely/api/annotation_api.py,sha256=Eps-Jf10_SQFy7DjghUnyiM6DcVJBsamHDViRAX
|
|
|
25
25
|
supervisely/api/api.py,sha256=u2T0yOQ-tUnP3iIzFu8zEQb4t_EDrXQSKnaXHCvFDyg,36514
|
|
26
26
|
supervisely/api/app_api.py,sha256=zX3Iy16RuGwtcLZfMs3YfUFc93S9AVGb3W_eINeMjOs,66729
|
|
27
27
|
supervisely/api/dataset_api.py,sha256=7iwAyz3pmzFG2i072gLdXjczfBGbyj-V_rRl7Tx-V30,37944
|
|
28
|
-
supervisely/api/file_api.py,sha256=
|
|
28
|
+
supervisely/api/file_api.py,sha256=y8FkE-vx1382cbhNo_rTZs7SobrkxmYQAe79CpvStO4,54279
|
|
29
29
|
supervisely/api/github_api.py,sha256=NIexNjEer9H5rf5sw2LEZd7C1WR-tK4t6IZzsgeAAwQ,623
|
|
30
30
|
supervisely/api/image_annotation_tool_api.py,sha256=YcUo78jRDBJYvIjrd-Y6FJAasLta54nnxhyaGyanovA,5237
|
|
31
31
|
supervisely/api/image_api.py,sha256=1AgY6Tk-0rJf1miR5kN1YSkQ7EVihgBnPWbP-Ic55NI,135790
|
|
@@ -718,28 +718,28 @@ supervisely/nn/artifacts/unet.py,sha256=Gn8ADfwC4F-MABVDPRY7g_ZaAIaaOAEbhhIGII-o
|
|
|
718
718
|
supervisely/nn/artifacts/yolov5.py,sha256=6KDCyDlLO7AT9of1qHjCaG5mmxCv6C0p-zCk9KJ0PH4,1478
|
|
719
719
|
supervisely/nn/artifacts/yolov8.py,sha256=c3MzbOTYD6RT5N4F9oZ0SWXxyonjJ6ZQfZLYUHPRZg4,1204
|
|
720
720
|
supervisely/nn/benchmark/__init__.py,sha256=RxqbBx7cbzookq2DRvxYIaRofON9uxHeY5h8DqDbZq0,187
|
|
721
|
-
supervisely/nn/benchmark/base_benchmark.py,sha256=
|
|
721
|
+
supervisely/nn/benchmark/base_benchmark.py,sha256=f0TlgPdtl5-hWe38k1q4Jhld48kiDIhhWmGGJoO-FGA,22366
|
|
722
722
|
supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
|
|
723
723
|
supervisely/nn/benchmark/instance_segmentation_benchmark.py,sha256=9iiWEH7KDw7ps0mQQdzIrCtCKg4umHekF3ws7jIGjmE,938
|
|
724
724
|
supervisely/nn/benchmark/object_detection_benchmark.py,sha256=s1S-L952etgz-UsDPyg69AgmFfAoJXvFHhITT8zB5iw,956
|
|
725
725
|
supervisely/nn/benchmark/utils.py,sha256=evcoUFPkeEW1-GvAPYp8EoOv3WAsqzaSmCY4lbLLfAQ,607
|
|
726
726
|
supervisely/nn/benchmark/coco_utils/__init__.py,sha256=MKxuzzBWpRCwR8kOb5NXUK8vD-2mroJn48xd6tv9FeI,139
|
|
727
|
-
supervisely/nn/benchmark/coco_utils/sly2coco.py,sha256=
|
|
727
|
+
supervisely/nn/benchmark/coco_utils/sly2coco.py,sha256=iudlcHNynthscH-V5qwCLk6VgIcxYrMEuAfGIjrOjZ0,6867
|
|
728
728
|
supervisely/nn/benchmark/coco_utils/utils.py,sha256=J9kM_Cn4XxfsrSQ8Rx6eb1UsS65-wOybaCkI9rQDeiU,504
|
|
729
729
|
supervisely/nn/benchmark/evaluation/__init__.py,sha256=1NGV_xEGe9lyPdE5gJ8AASKzm2WyZ_jKlh9WVvCQIaY,287
|
|
730
730
|
supervisely/nn/benchmark/evaluation/base_evaluator.py,sha256=Ac1EsvRrMH-Fck1aVS9T2Tx1m9PfialRA3z8XJs5e8U,1039
|
|
731
|
-
supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=
|
|
732
|
-
supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=
|
|
731
|
+
supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=oskpLBSwo_u224m_fc-oVJD0GGWgsyR9HrBVEEQ-FAE,3101
|
|
732
|
+
supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=5XrTnNpgdZgJ-LgXdUd74OvZLTQEenltxTSO12bTwqg,2943
|
|
733
733
|
supervisely/nn/benchmark/evaluation/coco/__init__.py,sha256=l6dFxp9aenywosQzQkIaDEI1p-DDQ63OgJJXxSVB4Mk,172
|
|
734
734
|
supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py,sha256=tgUAXngl0QcGpSdGvZRVo6f_0YP_PF4Leu2fpx5a_Us,10702
|
|
735
735
|
supervisely/nn/benchmark/evaluation/coco/metric_provider.py,sha256=j4YMk20t3lsX3QnsSIRjEYx8EayHw77I4KdXxKfgxeI,17513
|
|
736
736
|
supervisely/nn/benchmark/evaluation/coco/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
|
|
737
737
|
supervisely/nn/benchmark/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
738
738
|
supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=4QdBZqJmmPYdcB7x565zOtXhDFRyXIB4tpu0V-_otoc,3724
|
|
739
|
-
supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=
|
|
739
|
+
supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=NJBduyKE0UPihPtM2BR2eOdDwkUWZ3lMBpZGy9UFOZ0,13859
|
|
740
740
|
supervisely/nn/benchmark/visualization/vis_templates.py,sha256=tDPQcuByvnDdfGdDaT-KhemnKCtieunp-MgnGAPsbrQ,9905
|
|
741
741
|
supervisely/nn/benchmark/visualization/vis_widgets.py,sha256=CsT7DSfxH4g4zHsmm_7RCJf3YR6zXiADuYhUIIGdn7w,4073
|
|
742
|
-
supervisely/nn/benchmark/visualization/visualizer.py,sha256=
|
|
742
|
+
supervisely/nn/benchmark/visualization/visualizer.py,sha256=BLu31ETO202AgpT1gjiAry-m_hk3ExzyFUsQtOHaeqU,31729
|
|
743
743
|
supervisely/nn/benchmark/visualization/inference_speed/__init__.py,sha256=6Nahwt9R61_Jc1eWupXa70CgyRQ7tbUeiDWR26017rY,554
|
|
744
744
|
supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py,sha256=73gbXs1uTfxxWH-UCJdR72m-48jMD5qVyMyolf5jNoc,6140
|
|
745
745
|
supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py,sha256=ivUVriKyhx9ZtwVSqrAkUqq1SJGYYxNLwLQR1UgE4aM,900
|
|
@@ -747,18 +747,18 @@ supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py,sha
|
|
|
747
747
|
supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py,sha256=bVpNS3YBP0TGsqE_XQBuFMJI5ybDM0RZpEzFyT7cbkA,2157
|
|
748
748
|
supervisely/nn/benchmark/visualization/text_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
749
749
|
supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py,sha256=XGeBrbP-ROyKYbqYZzA281_IG45Ygu9NKyqG2I3o5TU,1124
|
|
750
|
-
supervisely/nn/benchmark/visualization/text_templates/instance_segmentation_text.py,sha256=
|
|
751
|
-
supervisely/nn/benchmark/visualization/text_templates/object_detection_text.py,sha256=
|
|
750
|
+
supervisely/nn/benchmark/visualization/text_templates/instance_segmentation_text.py,sha256=ud_XqN3q8nbpAvk3JvW_8LNcmnkq7B-MuLgILgSVeJA,25116
|
|
751
|
+
supervisely/nn/benchmark/visualization/text_templates/object_detection_text.py,sha256=jKHeKZGwEP1rnKpKHpf9x5iP9L8JaFMkvnFcayxvgeI,24508
|
|
752
752
|
supervisely/nn/benchmark/visualization/vis_metrics/__init__.py,sha256=Qrd9NMgFUQ1nbEy4NEu59RXR4OmVaBdK_iLOGpwVRCA,2249
|
|
753
753
|
supervisely/nn/benchmark/visualization/vis_metrics/classwise_error_analysis.py,sha256=8S_XYSA_qyVWAEnvebdjuw9ktJScDHgTVvZRfh-bvpc,1841
|
|
754
754
|
supervisely/nn/benchmark/visualization/vis_metrics/confidence_distribution.py,sha256=m-z0-jPn3dd_X-w49Zjkb4qEEP6Rw6fVpSjgSuJoeRw,3794
|
|
755
755
|
supervisely/nn/benchmark/visualization/vis_metrics/confidence_score.py,sha256=F38WSVF2a2ePV7DWfYdxDlSjM68NvjPMccfaTQWg0uI,3356
|
|
756
756
|
supervisely/nn/benchmark/visualization/vis_metrics/confusion_matrix.py,sha256=SFDRpoJX-IJYv-SoCXOyhXZTpVUm6G1IYCe15Nei9Uc,3362
|
|
757
|
-
supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py,sha256
|
|
757
|
+
supervisely/nn/benchmark/visualization/vis_metrics/explorer_grid.py,sha256=-TmGMWr6bHZd80TBGk3S_qdDQ-xlAL-zczAYw8h9vtU,5738
|
|
758
758
|
supervisely/nn/benchmark/visualization/vis_metrics/f1_score_at_different_iou.py,sha256=QbHUT0-beBs0z8anbma1MS6iNEG89CmL6iSj8-ejnlc,3158
|
|
759
759
|
supervisely/nn/benchmark/visualization/vis_metrics/frequently_confused.py,sha256=2lhwqaXdIEXhFSaqBYcJjRLp_OgBTEYxajYib0v4qRQ,3992
|
|
760
760
|
supervisely/nn/benchmark/visualization/vis_metrics/iou_distribution.py,sha256=Kir2F3piCBf-xKhoQzVXbo38zyrLLhOr3bYay90H1-g,3015
|
|
761
|
-
supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py,sha256=
|
|
761
|
+
supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py,sha256=38_dJAzAfJNsfFOg_bqE3B-btrk1K_sQOpwk26Y64Nk,6134
|
|
762
762
|
supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py,sha256=rsm_hdE0pYCHY-5v0pjDIid71y2tPbzYbmH2Qw-RS-4,3983
|
|
763
763
|
supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py,sha256=lSb2-jfplyERIUCi8_6P9aq6C77JGOKOJK20J824sEE,5623
|
|
764
764
|
supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py,sha256=YHfueea2EkUgNGP4FCyKyCaCtCwaYeYNJ3WwfF-Hzi4,3553
|
|
@@ -766,14 +766,14 @@ supervisely/nn/benchmark/visualization/vis_metrics/overview.py,sha256=_TwrBnkEfw
|
|
|
766
766
|
supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py,sha256=mm8IVM90EoIC_9GsiM-Jyhh6jPqQcHMo788VAvRAzMY,1877
|
|
767
767
|
supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py,sha256=4-AwEQk1ywuW4zXO_EXo7_aFMjenwhnLlGX2PWqiu0k,3574
|
|
768
768
|
supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py,sha256=9Uuibo38HVGPChPbCW8i3cMYdb6-NFlys1TBisp5zOU,1442
|
|
769
|
-
supervisely/nn/benchmark/visualization/vis_metrics/precision.py,sha256=
|
|
770
|
-
supervisely/nn/benchmark/visualization/vis_metrics/recall.py,sha256=
|
|
769
|
+
supervisely/nn/benchmark/visualization/vis_metrics/precision.py,sha256=_ATnCkson-tSOv3xp7LI7BmwzIax75zuKs7VoeBZ_ds,2156
|
|
770
|
+
supervisely/nn/benchmark/visualization/vis_metrics/recall.py,sha256=trRMw8ziWMaDp5cmJuwFaJ6aO_rfsTbCYb9LMdr9D_Q,2078
|
|
771
771
|
supervisely/nn/benchmark/visualization/vis_metrics/recall_vs_precision.py,sha256=RuN3tQA3Zt5MmCha8T0WgWDIvzURjsqfL4ap_LYqN-Y,1859
|
|
772
772
|
supervisely/nn/benchmark/visualization/vis_metrics/reliability_diagram.py,sha256=Vxta2s0RTTcV0GCcMiF8CykCtZYryLTwGjW9vVUrK3I,3107
|
|
773
773
|
supervisely/nn/benchmark/visualization/vis_metrics/what_is.py,sha256=MDnYR-o7Mj-YE1Jwu9EcLUEPcu6rLknRx7LvV4nnUBo,842
|
|
774
774
|
supervisely/nn/inference/__init__.py,sha256=mtEci4Puu-fRXDnGn8RP47o97rv3VTE0hjbYO34Zwqg,1622
|
|
775
775
|
supervisely/nn/inference/cache.py,sha256=vjFYIkoV-txzAl_C_WKvS5odccBU8GHFY8iTxxnSqLU,25619
|
|
776
|
-
supervisely/nn/inference/inference.py,sha256=
|
|
776
|
+
supervisely/nn/inference/inference.py,sha256=7N6fMqOd18VQoTCaP4slc_cuL5ovc43FIpWomv6PtEc,114524
|
|
777
777
|
supervisely/nn/inference/session.py,sha256=jmkkxbe2kH-lEgUU6Afh62jP68dxfhF5v6OGDfLU62E,35757
|
|
778
778
|
supervisely/nn/inference/video_inference.py,sha256=8Bshjr6rDyLay5Za8IB8Dr6FURMO2R_v7aELasO8pR4,5746
|
|
779
779
|
supervisely/nn/inference/gui/__init__.py,sha256=e3RKi93bI1r_0Dkvs_gaR1p_jkzkBMNjrcx-RVlm93k,88
|
|
@@ -953,9 +953,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
953
953
|
supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
|
|
954
954
|
supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
|
|
955
955
|
supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
|
|
956
|
-
supervisely-6.73.
|
|
957
|
-
supervisely-6.73.
|
|
958
|
-
supervisely-6.73.
|
|
959
|
-
supervisely-6.73.
|
|
960
|
-
supervisely-6.73.
|
|
961
|
-
supervisely-6.73.
|
|
956
|
+
supervisely-6.73.202.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
957
|
+
supervisely-6.73.202.dist-info/METADATA,sha256=MUPM7KGiNv7G7N0NuTuSD2xmPsAkklij4YUNiS8hknM,33077
|
|
958
|
+
supervisely-6.73.202.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
959
|
+
supervisely-6.73.202.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
|
|
960
|
+
supervisely-6.73.202.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
|
|
961
|
+
supervisely-6.73.202.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|