supervisely 6.73.205__py3-none-any.whl → 6.73.207__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/convert/__init__.py +1 -2
- supervisely/convert/base_converter.py +1 -0
- supervisely/convert/converter.py +7 -13
- supervisely/convert/image/high_color/__init__.py +0 -0
- supervisely/convert/image/high_color/high_color_depth.py +141 -0
- supervisely/convert/image/high_color/high_color_helper.py +57 -0
- supervisely/convert/image/image_converter.py +19 -7
- supervisely/nn/benchmark/base_benchmark.py +9 -0
- supervisely/nn/benchmark/evaluation/base_evaluator.py +29 -1
- supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py +47 -11
- supervisely/nn/benchmark/evaluation/coco/evaluation_params.yaml +2 -0
- supervisely/nn/benchmark/evaluation/coco/metric_provider.py +59 -0
- supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py +18 -2
- supervisely/nn/benchmark/evaluation/object_detection_evaluator.py +18 -2
- supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py +1 -1
- supervisely/nn/benchmark/visualization/vis_metric_base.py +1 -0
- supervisely/nn/benchmark/visualization/vis_metrics/overview.py +32 -3
- supervisely/nn/benchmark/visualization/vis_templates.py +8 -2
- supervisely/nn/benchmark/visualization/vis_widgets.py +1 -0
- supervisely/nn/benchmark/visualization/visualizer.py +11 -0
- supervisely/team_files/__init__.py +5 -1
- supervisely/team_files/team_files_path.py +2 -0
- {supervisely-6.73.205.dist-info → supervisely-6.73.207.dist-info}/METADATA +1 -1
- {supervisely-6.73.205.dist-info → supervisely-6.73.207.dist-info}/RECORD +28 -24
- {supervisely-6.73.205.dist-info → supervisely-6.73.207.dist-info}/LICENSE +0 -0
- {supervisely-6.73.205.dist-info → supervisely-6.73.207.dist-info}/WHEEL +0 -0
- {supervisely-6.73.205.dist-info → supervisely-6.73.207.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.205.dist-info → supervisely-6.73.207.dist-info}/top_level.txt +0 -0
supervisely/convert/__init__.py
CHANGED
|
@@ -29,7 +29,7 @@ from supervisely.convert.image.yolo.yolo_converter import YOLOConverter
|
|
|
29
29
|
from supervisely.convert.image.multi_view.multi_view import MultiViewImageConverter
|
|
30
30
|
from supervisely.convert.image.label_me.label_me_converter import LabelmeConverter
|
|
31
31
|
from supervisely.convert.image.label_studio.label_studio_converter import LabelStudioConverter
|
|
32
|
-
|
|
32
|
+
from supervisely.convert.image.high_color.high_color_depth import HighColorDepthImageConverter
|
|
33
33
|
|
|
34
34
|
|
|
35
35
|
# Pointcloud
|
|
@@ -52,4 +52,3 @@ from supervisely.convert.video.sly.sly_video_converter import SLYVideoConverter
|
|
|
52
52
|
# Volume
|
|
53
53
|
from supervisely.convert.volume.sly.sly_volume_converter import SLYVolumeConverter
|
|
54
54
|
from supervisely.convert.volume.dicom.dicom_converter import DICOMConverter
|
|
55
|
-
|
supervisely/convert/converter.py
CHANGED
|
@@ -3,15 +3,13 @@ from pathlib import Path
|
|
|
3
3
|
|
|
4
4
|
from tqdm import tqdm
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
from typing import Literal
|
|
8
|
-
except ImportError:
|
|
9
|
-
from typing_extensions import Literal
|
|
6
|
+
from typing import Literal, Optional
|
|
10
7
|
|
|
11
8
|
from supervisely._utils import is_production
|
|
12
9
|
from supervisely.api.api import Api
|
|
13
10
|
from supervisely.app import get_data_dir
|
|
14
11
|
from supervisely.convert.image.csv.csv_converter import CSVConverter
|
|
12
|
+
from supervisely.convert.image.high_color.high_color_depth import HighColorDepthImageConverter
|
|
15
13
|
from supervisely.convert.image.image_converter import ImageConverter
|
|
16
14
|
from supervisely.convert.pointcloud.pointcloud_converter import PointcloudConverter
|
|
17
15
|
from supervisely.convert.pointcloud_episodes.pointcloud_episodes_converter import (
|
|
@@ -33,21 +31,17 @@ from supervisely.io.fs import (
|
|
|
33
31
|
from supervisely.project.project_type import ProjectType
|
|
34
32
|
from supervisely.sly_logger import logger
|
|
35
33
|
from supervisely.task.progress import Progress
|
|
34
|
+
from supervisely.project.project_settings import LabelingInterface
|
|
36
35
|
|
|
37
36
|
|
|
38
37
|
class ImportManager:
|
|
38
|
+
|
|
39
39
|
def __init__(
|
|
40
40
|
self,
|
|
41
41
|
input_data: str,
|
|
42
42
|
project_type: ProjectType,
|
|
43
|
-
team_id: int = None,
|
|
44
|
-
labeling_interface:
|
|
45
|
-
"default",
|
|
46
|
-
"multi_view",
|
|
47
|
-
"multispectral",
|
|
48
|
-
"images_with_16_color",
|
|
49
|
-
"medical_imaging_single",
|
|
50
|
-
] = "default",
|
|
43
|
+
team_id: Optional[int] = None,
|
|
44
|
+
labeling_interface: LabelingInterface = LabelingInterface.DEFAULT,
|
|
51
45
|
upload_as_links: bool = False,
|
|
52
46
|
):
|
|
53
47
|
self._api = Api.from_env()
|
|
@@ -69,7 +63,7 @@ class ImportManager:
|
|
|
69
63
|
|
|
70
64
|
self._modality = project_type
|
|
71
65
|
self._converter = self.get_converter()
|
|
72
|
-
if isinstance(self._converter, CSVConverter):
|
|
66
|
+
if isinstance(self._converter, (HighColorDepthImageConverter, CSVConverter)):
|
|
73
67
|
self._converter.team_id = self._team_id
|
|
74
68
|
|
|
75
69
|
@property
|
|
File without changes
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from collections import defaultdict
|
|
3
|
+
from typing import Dict, Generator, Union
|
|
4
|
+
|
|
5
|
+
import cv2
|
|
6
|
+
import numpy as np
|
|
7
|
+
import tqdm
|
|
8
|
+
|
|
9
|
+
from supervisely import ProjectMeta, generate_free_name, is_development, logger
|
|
10
|
+
from supervisely._utils import batched
|
|
11
|
+
from supervisely.annotation.annotation import Annotation
|
|
12
|
+
from supervisely.api.api import Api
|
|
13
|
+
from supervisely.convert.base_converter import AvailableImageConverters
|
|
14
|
+
from supervisely.convert.image.high_color import high_color_helper as helpers
|
|
15
|
+
from supervisely.convert.image.image_converter import ImageConverter
|
|
16
|
+
from supervisely.io.env import task_id as get_task_id
|
|
17
|
+
from supervisely.io.env import team_id as get_team_id
|
|
18
|
+
from supervisely.io.fs import list_files_recursively
|
|
19
|
+
from supervisely.io.json import load_json_file
|
|
20
|
+
from supervisely.project.project_settings import LabelingInterface
|
|
21
|
+
from supervisely.team_files import RECOMMENDED_IMPORT_BACKUP_PATH
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class HighColorDepthImageConverter(ImageConverter):
|
|
25
|
+
# allowed_exts = [".png", ".tiff", ".tif", ".bmp", ".exr", ".hdr"]
|
|
26
|
+
allowed_exts = [".exr", ".hdr"]
|
|
27
|
+
|
|
28
|
+
class Item(ImageConverter.Item):
|
|
29
|
+
|
|
30
|
+
def __init__(self, *args, **kwargs):
|
|
31
|
+
super().__init__(*args, **kwargs)
|
|
32
|
+
self._original_path = None
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def original_path(self) -> Union[str, None]:
|
|
36
|
+
return self._original_path
|
|
37
|
+
|
|
38
|
+
@original_path.setter
|
|
39
|
+
def original_path(self, original_path: str) -> None:
|
|
40
|
+
self._original_path = original_path
|
|
41
|
+
|
|
42
|
+
def __init__(self, *args, **kwargs):
|
|
43
|
+
super().__init__(*args, **kwargs)
|
|
44
|
+
self._team_id = None
|
|
45
|
+
|
|
46
|
+
def __str__(self):
|
|
47
|
+
return AvailableImageConverters.HIGH_COLOR_DEPTH
|
|
48
|
+
|
|
49
|
+
def validate_labeling_interface(self) -> bool:
|
|
50
|
+
"""Only multi_view labeling interface can be used to group images on single screen."""
|
|
51
|
+
return self._labeling_interface in [
|
|
52
|
+
LabelingInterface.DEFAULT,
|
|
53
|
+
LabelingInterface.IMAGES_WITH_16_COLOR,
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
def validate_format(self) -> bool:
|
|
57
|
+
logger.debug(f"Validating format: {self.__str__()}")
|
|
58
|
+
|
|
59
|
+
for image_path in self._find_high_color_depth_image_generator(self._input_data):
|
|
60
|
+
item = self.Item(image_path)
|
|
61
|
+
self._items.append(item)
|
|
62
|
+
|
|
63
|
+
if self.items_count == 0:
|
|
64
|
+
logger.debug(f"Not found any images with high color depth in '{self._input_data}'")
|
|
65
|
+
return False
|
|
66
|
+
|
|
67
|
+
return True
|
|
68
|
+
|
|
69
|
+
def _find_high_color_depth_image_generator(self, input_data: str) -> Generator[str, None, None]:
|
|
70
|
+
for image_path in list_files_recursively(input_data, valid_extensions=self.allowed_exts):
|
|
71
|
+
if self._is_high_color_depth_image(image_path):
|
|
72
|
+
yield image_path
|
|
73
|
+
|
|
74
|
+
def _is_high_color_depth_image(self, image_path: str) -> bool:
|
|
75
|
+
image = helpers.read_high_color_images(image_path)
|
|
76
|
+
if image is None:
|
|
77
|
+
return False
|
|
78
|
+
return image.dtype in self._supported_depths
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def _supported_depths(self):
|
|
82
|
+
return [np.uint16, np.int16, np.float16, np.float32, np.float64, np.uint32]
|
|
83
|
+
|
|
84
|
+
def to_supervisely(self, *args, **kwargs):
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
def upload_dataset(
|
|
88
|
+
self,
|
|
89
|
+
api: Api,
|
|
90
|
+
dataset_id: int,
|
|
91
|
+
batch_size: int = 50,
|
|
92
|
+
log_progress=True,
|
|
93
|
+
) -> None:
|
|
94
|
+
"""Upload converted data to Supervisely"""
|
|
95
|
+
team_id = get_team_id()
|
|
96
|
+
backup_dir = os.path.join(RECOMMENDED_IMPORT_BACKUP_PATH, str(get_task_id()))
|
|
97
|
+
|
|
98
|
+
# progress, progress_cb = None, None
|
|
99
|
+
# if log_progress:
|
|
100
|
+
# progress, progress_cb = self.get_progress(
|
|
101
|
+
# len(self.get_items()),
|
|
102
|
+
# "Backing up original files and converting images to nrrd...",
|
|
103
|
+
# )
|
|
104
|
+
|
|
105
|
+
# Back up original files to avoid data loss and convert images to nrrd format
|
|
106
|
+
with tqdm.tqdm(
|
|
107
|
+
total=len(self.get_items()),
|
|
108
|
+
desc="Backing up original files and converting images to nrrd...",
|
|
109
|
+
) as pbar:
|
|
110
|
+
for batch_items in batched(self.get_items(), batch_size=batch_size):
|
|
111
|
+
|
|
112
|
+
local_paths, remote_paths = [], []
|
|
113
|
+
|
|
114
|
+
for item in batch_items:
|
|
115
|
+
local_paths.append(item.path)
|
|
116
|
+
|
|
117
|
+
remote_path = os.path.join(backup_dir, item.name)
|
|
118
|
+
remote_paths.append(remote_path)
|
|
119
|
+
|
|
120
|
+
item.original_path = remote_path
|
|
121
|
+
image = helpers.read_high_color_images(item.path)
|
|
122
|
+
|
|
123
|
+
orig_item_name = item.name
|
|
124
|
+
nrrd_path = item.path + ".nrrd"
|
|
125
|
+
nrrd_path = helpers.save_nrrd(image, nrrd_path)
|
|
126
|
+
item.path = nrrd_path
|
|
127
|
+
|
|
128
|
+
item_meta = {}
|
|
129
|
+
# Add original file path to image meta
|
|
130
|
+
item_meta["original_file_path"] = os.path.join(backup_dir, orig_item_name)
|
|
131
|
+
if item.meta:
|
|
132
|
+
item_meta.update(item.meta)
|
|
133
|
+
item.set_meta_data(item_meta)
|
|
134
|
+
|
|
135
|
+
api.file.upload_bulk(team_id, local_paths, remote_paths)
|
|
136
|
+
pbar.update(len(batch_items))
|
|
137
|
+
|
|
138
|
+
if log_progress and is_development():
|
|
139
|
+
pbar.close()
|
|
140
|
+
|
|
141
|
+
super().upload_dataset(api, dataset_id, batch_size, log_progress, self.get_items())
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import nrrd
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from supervisely._utils import generate_free_name
|
|
8
|
+
from supervisely.imaging.image import fliplr
|
|
9
|
+
from supervisely.io.fs import get_file_ext
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def read_high_color_images(image_path: str) -> np.ndarray:
|
|
13
|
+
"""Read high color images"""
|
|
14
|
+
|
|
15
|
+
ext = get_file_ext(image_path).lower()
|
|
16
|
+
if ext in [".exr", ".hdr"]:
|
|
17
|
+
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
|
|
18
|
+
|
|
19
|
+
image = cv2.imread(image_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
|
|
20
|
+
if image.ndim == 3:
|
|
21
|
+
# Convert the image to grayscale if it has 3 dimensions
|
|
22
|
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
23
|
+
# Normalize the image to 0-255
|
|
24
|
+
image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX)
|
|
25
|
+
# Convert to uint16
|
|
26
|
+
image = image.astype(np.uint16)
|
|
27
|
+
|
|
28
|
+
elif ext in [".tiff", ".tif"]:
|
|
29
|
+
import tifffile
|
|
30
|
+
|
|
31
|
+
image = tifffile.imread(image_path)
|
|
32
|
+
else:
|
|
33
|
+
image = cv2.imread(image_path)
|
|
34
|
+
|
|
35
|
+
return image
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def save_nrrd(image: np.ndarray, save_path: str) -> str:
|
|
39
|
+
"""Save numpy image as nrrd file"""
|
|
40
|
+
|
|
41
|
+
# Ensure the image is 2D
|
|
42
|
+
if image.ndim != 2:
|
|
43
|
+
raise ValueError("The image must be 2D to save as NRRD.")
|
|
44
|
+
|
|
45
|
+
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
|
|
46
|
+
image = fliplr(image)
|
|
47
|
+
|
|
48
|
+
used_names = {os.path.basename(save_path)}
|
|
49
|
+
|
|
50
|
+
name = os.path.basename(save_path)
|
|
51
|
+
while os.path.exists(save_path):
|
|
52
|
+
name = generate_free_name(used_names, name, True, True)
|
|
53
|
+
save_path = os.path.join(os.path.dirname(save_path), name)
|
|
54
|
+
|
|
55
|
+
nrrd.write(save_path, image)
|
|
56
|
+
|
|
57
|
+
return save_path
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import os
|
|
2
1
|
import mimetypes
|
|
2
|
+
import os
|
|
3
3
|
from typing import Dict, List, Optional, Tuple, Union
|
|
4
4
|
|
|
5
5
|
import cv2
|
|
@@ -16,10 +16,10 @@ from supervisely import (
|
|
|
16
16
|
is_development,
|
|
17
17
|
logger,
|
|
18
18
|
)
|
|
19
|
-
from supervisely.convert.base_converter import BaseConverter
|
|
20
19
|
from supervisely.api.api import ApiContext
|
|
20
|
+
from supervisely.convert.base_converter import BaseConverter
|
|
21
21
|
from supervisely.imaging.image import SUPPORTED_IMG_EXTS, is_valid_ext
|
|
22
|
-
from supervisely.io.fs import get_file_ext, get_file_name,
|
|
22
|
+
from supervisely.io.fs import dirs_filter, get_file_ext, get_file_name, list_files
|
|
23
23
|
from supervisely.io.json import load_json_file
|
|
24
24
|
from supervisely.project.project_settings import LabelingInterface
|
|
25
25
|
|
|
@@ -31,12 +31,13 @@ class ImageConverter(BaseConverter):
|
|
|
31
31
|
modality = "images"
|
|
32
32
|
|
|
33
33
|
class Item(BaseConverter.BaseItem):
|
|
34
|
+
|
|
34
35
|
def __init__(
|
|
35
36
|
self,
|
|
36
37
|
item_path: str,
|
|
37
38
|
ann_data: Union[str, dict] = None,
|
|
38
39
|
meta_data: Union[str, dict] = None,
|
|
39
|
-
shape: Union[Tuple, List] = None,
|
|
40
|
+
shape: Optional[Union[Tuple, List]] = None,
|
|
40
41
|
custom_data: Optional[dict] = None,
|
|
41
42
|
):
|
|
42
43
|
self._path: str = item_path
|
|
@@ -51,7 +52,7 @@ class ImageConverter(BaseConverter):
|
|
|
51
52
|
def meta(self) -> Union[str, dict]:
|
|
52
53
|
return self._meta_data
|
|
53
54
|
|
|
54
|
-
def set_shape(self, shape: Tuple[int, int] = None) -> None:
|
|
55
|
+
def set_shape(self, shape: Optional[Tuple[int, int]] = None) -> None:
|
|
55
56
|
try:
|
|
56
57
|
if shape is not None:
|
|
57
58
|
self._shape = shape
|
|
@@ -145,7 +146,14 @@ class ImageConverter(BaseConverter):
|
|
|
145
146
|
)
|
|
146
147
|
item_names.append(name)
|
|
147
148
|
item_paths.append(item.path)
|
|
148
|
-
|
|
149
|
+
|
|
150
|
+
if isinstance(item.meta, str): # path to file
|
|
151
|
+
item_metas.append(load_json_file(item.meta))
|
|
152
|
+
elif isinstance(item.meta, dict):
|
|
153
|
+
item_metas.append(item.meta)
|
|
154
|
+
else:
|
|
155
|
+
item_metas.append({})
|
|
156
|
+
|
|
149
157
|
if ann is not None:
|
|
150
158
|
anns.append(ann)
|
|
151
159
|
|
|
@@ -199,7 +207,11 @@ class ImageConverter(BaseConverter):
|
|
|
199
207
|
|
|
200
208
|
def _is_meta_dir(dirpath: str) -> bool:
|
|
201
209
|
if os.path.basename(dirpath).lower() == "meta":
|
|
202
|
-
jsons = list_files(
|
|
210
|
+
jsons = list_files(
|
|
211
|
+
dirpath,
|
|
212
|
+
valid_extensions=[".json"],
|
|
213
|
+
ignore_valid_extensions_case=True,
|
|
214
|
+
)
|
|
203
215
|
return len(jsons) > 0
|
|
204
216
|
return False
|
|
205
217
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import yaml
|
|
2
3
|
from typing import Callable, List, Optional, Tuple, Union
|
|
3
4
|
|
|
4
5
|
import numpy as np
|
|
@@ -31,6 +32,7 @@ class BaseBenchmark:
|
|
|
31
32
|
progress: Optional[SlyTqdm] = None,
|
|
32
33
|
progress_secondary: Optional[SlyTqdm] = None,
|
|
33
34
|
classes_whitelist: Optional[List[str]] = None,
|
|
35
|
+
evaluation_params: Optional[dict] = None,
|
|
34
36
|
):
|
|
35
37
|
self.api = api
|
|
36
38
|
self.session: SessionJSON = None
|
|
@@ -51,6 +53,8 @@ class BaseBenchmark:
|
|
|
51
53
|
self.vis_texts = None
|
|
52
54
|
self.inference_speed_text = None
|
|
53
55
|
self.train_info = None
|
|
56
|
+
self.evaluation_params = evaluation_params
|
|
57
|
+
self._validate_evaluation_params()
|
|
54
58
|
|
|
55
59
|
def _get_evaluator_class(self) -> type:
|
|
56
60
|
raise NotImplementedError()
|
|
@@ -152,6 +156,7 @@ class BaseBenchmark:
|
|
|
152
156
|
progress=self.pbar,
|
|
153
157
|
items_count=self.dt_project_info.items_count,
|
|
154
158
|
classes_whitelist=self.classes_whitelist,
|
|
159
|
+
evaluation_params=self.evaluation_params,
|
|
155
160
|
)
|
|
156
161
|
self.evaluator.evaluate()
|
|
157
162
|
|
|
@@ -552,3 +557,7 @@ class BaseBenchmark:
|
|
|
552
557
|
chagned = True
|
|
553
558
|
if chagned:
|
|
554
559
|
self.api.project.update_meta(pred_project_id, pred_meta.to_json())
|
|
560
|
+
|
|
561
|
+
def _validate_evaluation_params(self):
|
|
562
|
+
if self.evaluation_params:
|
|
563
|
+
self._get_evaluator_class().validate_evaluation_params(self.evaluation_params)
|
|
@@ -1,12 +1,15 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import pickle
|
|
3
|
-
from typing import List, Optional
|
|
3
|
+
from typing import List, Optional, Union
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
4
6
|
|
|
5
7
|
from supervisely.app.widgets import SlyTqdm
|
|
6
8
|
from supervisely.task.progress import tqdm_sly
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
class BaseEvaluator:
|
|
12
|
+
EVALUATION_PARAMS_YAML_PATH: str = None
|
|
10
13
|
|
|
11
14
|
def __init__(
|
|
12
15
|
self,
|
|
@@ -16,6 +19,7 @@ class BaseEvaluator:
|
|
|
16
19
|
progress: Optional[SlyTqdm] = None,
|
|
17
20
|
items_count: Optional[int] = None, # TODO: is it needed?
|
|
18
21
|
classes_whitelist: Optional[List[str]] = None,
|
|
22
|
+
evaluation_params: Optional[dict] = None,
|
|
19
23
|
):
|
|
20
24
|
self.gt_project_path = gt_project_path
|
|
21
25
|
self.dt_project_path = dt_project_path
|
|
@@ -25,12 +29,36 @@ class BaseEvaluator:
|
|
|
25
29
|
os.makedirs(result_dir, exist_ok=True)
|
|
26
30
|
self.classes_whitelist = classes_whitelist
|
|
27
31
|
|
|
32
|
+
if evaluation_params is None:
|
|
33
|
+
evaluation_params = self._get_default_evaluation_params()
|
|
34
|
+
self.evaluation_params = evaluation_params
|
|
35
|
+
if self.evaluation_params:
|
|
36
|
+
self.validate_evaluation_params(self.evaluation_params)
|
|
37
|
+
|
|
28
38
|
def evaluate(self):
|
|
29
39
|
raise NotImplementedError()
|
|
30
40
|
|
|
31
41
|
def get_result_dir(self) -> str:
|
|
32
42
|
return self.result_dir
|
|
33
43
|
|
|
44
|
+
@classmethod
|
|
45
|
+
def load_yaml_evaluation_params(cls) -> Union[str, None]:
|
|
46
|
+
if cls.EVALUATION_PARAMS_YAML_PATH is None:
|
|
47
|
+
return None
|
|
48
|
+
with open(cls.EVALUATION_PARAMS_YAML_PATH, "r") as f:
|
|
49
|
+
return f.read()
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def validate_evaluation_params(cls, evaluation_params: dict) -> None:
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
@classmethod
|
|
56
|
+
def _get_default_evaluation_params(cls) -> dict:
|
|
57
|
+
if cls.EVALUATION_PARAMS_YAML_PATH is None:
|
|
58
|
+
return {}
|
|
59
|
+
else:
|
|
60
|
+
return yaml.safe_load(cls.load_yaml_evaluation_params())
|
|
61
|
+
|
|
34
62
|
def _dump_pickle(self, data, file_path):
|
|
35
63
|
with open(file_path, "wb") as f:
|
|
36
64
|
pickle.dump(data, f)
|
|
@@ -1,10 +1,39 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
|
-
from typing import Callable,
|
|
2
|
+
from typing import Callable, List, Literal, Optional
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
6
|
+
# pylint: disable=import-error
|
|
7
|
+
from pycocotools.cocoeval import COCOeval
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def set_cocoeval_params(
|
|
11
|
+
cocoeval: COCOeval,
|
|
12
|
+
parameters: dict,
|
|
13
|
+
):
|
|
14
|
+
if parameters is None:
|
|
15
|
+
return
|
|
16
|
+
param_names = (
|
|
17
|
+
"iouThrs",
|
|
18
|
+
"recThrs",
|
|
19
|
+
"maxDets",
|
|
20
|
+
"areaRng",
|
|
21
|
+
"areaRngLbl",
|
|
22
|
+
# "kpt_oks_sigmas" # For keypoints
|
|
23
|
+
)
|
|
24
|
+
for param_name in param_names:
|
|
25
|
+
cocoeval.params.__setattr__(
|
|
26
|
+
param_name, parameters.get(param_name, cocoeval.params.__getattribute__(param_name))
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def calculate_metrics(
|
|
31
|
+
cocoGt,
|
|
32
|
+
cocoDt,
|
|
33
|
+
iouType: Literal["bbox", "segm"],
|
|
34
|
+
progress_cb: Optional[Callable] = None,
|
|
35
|
+
evaluation_params: Optional[dict] = None,
|
|
36
|
+
):
|
|
8
37
|
"""
|
|
9
38
|
Calculate COCO metrics.
|
|
10
39
|
|
|
@@ -19,35 +48,42 @@ def calculate_metrics(cocoGt, cocoDt, iouType: Literal["bbox", "segm"], progress
|
|
|
19
48
|
:return: Results of the evaluation
|
|
20
49
|
:rtype: dict
|
|
21
50
|
"""
|
|
22
|
-
from pycocotools.cocoeval import COCOeval # pylint: disable=import-error
|
|
23
51
|
|
|
24
|
-
progress_cb(1) if progress_cb is not None else None
|
|
25
52
|
cocoEval = COCOeval(cocoGt, cocoDt, iouType=iouType)
|
|
26
|
-
progress_cb(1) if progress_cb is not None else None
|
|
27
53
|
cocoEval.evaluate()
|
|
28
54
|
progress_cb(1) if progress_cb is not None else None
|
|
29
55
|
cocoEval.accumulate()
|
|
30
56
|
progress_cb(1) if progress_cb is not None else None
|
|
31
57
|
cocoEval.summarize()
|
|
32
|
-
progress_cb(1) if progress_cb is not None else None
|
|
33
58
|
|
|
34
59
|
# For classification metrics
|
|
35
60
|
cocoEval_cls = COCOeval(cocoGt, cocoDt, iouType=iouType)
|
|
36
|
-
progress_cb(1) if progress_cb is not None else None
|
|
37
61
|
cocoEval_cls.params.useCats = 0
|
|
38
62
|
cocoEval_cls.evaluate()
|
|
39
63
|
progress_cb(1) if progress_cb is not None else None
|
|
40
64
|
cocoEval_cls.accumulate()
|
|
41
65
|
progress_cb(1) if progress_cb is not None else None
|
|
42
66
|
cocoEval_cls.summarize()
|
|
43
|
-
|
|
67
|
+
|
|
68
|
+
iou_t = 0
|
|
69
|
+
is_custom_iou_threshold = (
|
|
70
|
+
evaluation_params is not None and evaluation_params.get("iou_threshold") and evaluation_params["iou_threshold"] != 0.5
|
|
71
|
+
)
|
|
72
|
+
if is_custom_iou_threshold:
|
|
73
|
+
iou_t = np.where(cocoEval.params.iouThrs == evaluation_params["iou_threshold"])[0][0]
|
|
44
74
|
|
|
45
75
|
eval_img_dict = get_eval_img_dict(cocoEval)
|
|
46
76
|
eval_img_dict_cls = get_eval_img_dict(cocoEval_cls)
|
|
47
|
-
matches = get_matches(eval_img_dict, eval_img_dict_cls, cocoEval_cls, iou_t=
|
|
77
|
+
matches = get_matches(eval_img_dict, eval_img_dict_cls, cocoEval_cls, iou_t=iou_t)
|
|
48
78
|
|
|
49
|
-
params = {
|
|
79
|
+
params = {
|
|
80
|
+
"iouThrs": cocoEval.params.iouThrs,
|
|
81
|
+
"recThrs": cocoEval.params.recThrs,
|
|
82
|
+
"evaluation_params": evaluation_params or {},
|
|
83
|
+
}
|
|
50
84
|
coco_metrics = {"mAP": cocoEval.stats[0], "precision": cocoEval.eval["precision"]}
|
|
85
|
+
coco_metrics["AP50"] = cocoEval.stats[1]
|
|
86
|
+
coco_metrics["AP75"] = cocoEval.stats[2]
|
|
51
87
|
eval_data = {
|
|
52
88
|
"matches": matches,
|
|
53
89
|
"coco_metrics": coco_metrics,
|
|
@@ -89,6 +89,8 @@ class MetricProvider:
|
|
|
89
89
|
self.coco_precision = coco_metrics["precision"]
|
|
90
90
|
self.iouThrs = params["iouThrs"]
|
|
91
91
|
self.recThrs = params["recThrs"]
|
|
92
|
+
self.iou_threshold = params["evaluation_params"]["iou_threshold"]
|
|
93
|
+
self.iou_threshold_idx = np.searchsorted(self.iouThrs, self.iou_threshold)
|
|
92
94
|
|
|
93
95
|
def calculate(self):
|
|
94
96
|
self.m_full = _MetricProvider(
|
|
@@ -134,7 +136,64 @@ class MetricProvider:
|
|
|
134
136
|
self._scores_tp_and_fp = self.m_full.scores_tp_and_fp()
|
|
135
137
|
self._maximum_calibration_error = self.m_full.maximum_calibration_error()
|
|
136
138
|
self._expected_calibration_error = self.m_full.expected_calibration_error()
|
|
139
|
+
|
|
140
|
+
def json_metrics(self):
|
|
141
|
+
base = self.base_metrics()
|
|
142
|
+
iou_name = int(self.iou_threshold * 100)
|
|
143
|
+
ap_by_class = self.AP_per_class().tolist()
|
|
144
|
+
ap_by_class = dict(zip(self.cat_names, ap_by_class))
|
|
145
|
+
ap_custom_by_class = self.AP_custom_per_class().tolist()
|
|
146
|
+
ap_custom_by_class = dict(zip(self.cat_names, ap_custom_by_class))
|
|
147
|
+
return {
|
|
148
|
+
"mAP": base["mAP"],
|
|
149
|
+
"AP50": self.coco_metrics["AP50"],
|
|
150
|
+
"AP75": self.coco_metrics["AP75"],
|
|
151
|
+
f"AP{iou_name}": self.AP_custom(),
|
|
152
|
+
"f1": base["f1"],
|
|
153
|
+
"precision": base["precision"],
|
|
154
|
+
"recall": base["recall"],
|
|
155
|
+
"iou": base["iou"],
|
|
156
|
+
"classification_accuracy": base["classification_accuracy"],
|
|
157
|
+
"calibration_score": base["calibration_score"],
|
|
158
|
+
"f1_optimal_conf": self.f1_optimal_conf,
|
|
159
|
+
"expected_calibration_error": self.expected_calibration_error(),
|
|
160
|
+
"maximum_calibration_error": self.maximum_calibration_error(),
|
|
161
|
+
"AP_by_class": ap_by_class,
|
|
162
|
+
f"AP{iou_name}_by_class": ap_custom_by_class,
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
def metric_table(self):
|
|
166
|
+
table = self.json_metrics()
|
|
167
|
+
iou_name = int(self.iou_threshold * 100)
|
|
168
|
+
return {
|
|
169
|
+
"mAP": table["mAP"],
|
|
170
|
+
"AP50": table["AP50"],
|
|
171
|
+
"AP75": table["AP75"],
|
|
172
|
+
f"AP{iou_name}": table[f"AP{iou_name}"],
|
|
173
|
+
"f1": table["f1"],
|
|
174
|
+
"precision": table["precision"],
|
|
175
|
+
"recall": table["recall"],
|
|
176
|
+
"Avg. IoU": table["iou"],
|
|
177
|
+
"Classification Acc.": table["classification_accuracy"],
|
|
178
|
+
"Calibration Score": table["calibration_score"],
|
|
179
|
+
"optimal confidence threshold": table["f1_optimal_conf"],
|
|
180
|
+
}
|
|
137
181
|
|
|
182
|
+
def AP_per_class(self):
|
|
183
|
+
s = self.coco_precision[:, :, :, 0, 2]
|
|
184
|
+
s[s == -1] = np.nan
|
|
185
|
+
ap = np.nanmean(s, axis=(0, 1))
|
|
186
|
+
return ap
|
|
187
|
+
|
|
188
|
+
def AP_custom_per_class(self):
|
|
189
|
+
s = self.coco_precision[self.iou_threshold_idx, :, :, 0, 2]
|
|
190
|
+
s[s == -1] = np.nan
|
|
191
|
+
ap = np.nanmean(s, axis=0)
|
|
192
|
+
return ap
|
|
193
|
+
|
|
194
|
+
def AP_custom(self):
|
|
195
|
+
return np.nanmean(self.AP_custom_per_class())
|
|
196
|
+
|
|
138
197
|
def base_metrics(self):
|
|
139
198
|
base = self._base_metrics
|
|
140
199
|
calibration_score = 1 - self._expected_calibration_error
|
|
@@ -4,9 +4,12 @@ from supervisely.io.json import dump_json_file
|
|
|
4
4
|
from supervisely.nn.benchmark.coco_utils import read_coco_datasets, sly2coco
|
|
5
5
|
from supervisely.nn.benchmark.evaluation import BaseEvaluator
|
|
6
6
|
from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
|
|
7
|
+
from pathlib import Path
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class InstanceSegmentationEvaluator(BaseEvaluator):
|
|
11
|
+
EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/coco/evaluation_params.yaml"
|
|
12
|
+
|
|
10
13
|
def evaluate(self):
|
|
11
14
|
try:
|
|
12
15
|
self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
|
|
@@ -19,12 +22,25 @@ class InstanceSegmentationEvaluator(BaseEvaluator):
|
|
|
19
22
|
|
|
20
23
|
self._dump_datasets()
|
|
21
24
|
self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
|
|
22
|
-
with self.pbar(message="Evaluation: Calculating metrics", total=
|
|
25
|
+
with self.pbar(message="Evaluation: Calculating metrics", total=5) as p:
|
|
23
26
|
self.eval_data = calculate_metrics(
|
|
24
|
-
self.cocoGt,
|
|
27
|
+
self.cocoGt,
|
|
28
|
+
self.cocoDt,
|
|
29
|
+
iouType="segm",
|
|
30
|
+
progress_cb=p.update,
|
|
31
|
+
evaluation_params=self.evaluation_params,
|
|
25
32
|
)
|
|
26
33
|
self._dump_eval_results()
|
|
27
34
|
|
|
35
|
+
@classmethod
|
|
36
|
+
def validate_evaluation_params(cls, evaluation_params: dict) -> None:
|
|
37
|
+
iou_threshold = evaluation_params.get("iou_threshold")
|
|
38
|
+
if iou_threshold is not None:
|
|
39
|
+
assert iou_threshold in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], (
|
|
40
|
+
f"iou_threshold must be one of [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], "
|
|
41
|
+
f"but got {iou_threshold}"
|
|
42
|
+
)
|
|
43
|
+
|
|
28
44
|
def _convert_to_coco(self):
|
|
29
45
|
cocoGt_json = sly2coco(
|
|
30
46
|
self.gt_project_path,
|
|
@@ -4,9 +4,12 @@ from supervisely.io.json import dump_json_file
|
|
|
4
4
|
from supervisely.nn.benchmark.coco_utils import read_coco_datasets, sly2coco
|
|
5
5
|
from supervisely.nn.benchmark.evaluation import BaseEvaluator
|
|
6
6
|
from supervisely.nn.benchmark.evaluation.coco import calculate_metrics
|
|
7
|
+
from pathlib import Path
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class ObjectDetectionEvaluator(BaseEvaluator):
|
|
11
|
+
EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/coco/evaluation_params.yaml"
|
|
12
|
+
|
|
10
13
|
def evaluate(self):
|
|
11
14
|
try:
|
|
12
15
|
self.cocoGt_json, self.cocoDt_json = self._convert_to_coco()
|
|
@@ -17,12 +20,25 @@ class ObjectDetectionEvaluator(BaseEvaluator):
|
|
|
17
20
|
"try to use newer version of NN app."
|
|
18
21
|
)
|
|
19
22
|
self.cocoGt, self.cocoDt = read_coco_datasets(self.cocoGt_json, self.cocoDt_json)
|
|
20
|
-
with self.pbar(message="Evaluation: Calculating metrics", total=
|
|
23
|
+
with self.pbar(message="Evaluation: Calculating metrics", total=5) as p:
|
|
21
24
|
self.eval_data = calculate_metrics(
|
|
22
|
-
self.cocoGt,
|
|
25
|
+
self.cocoGt,
|
|
26
|
+
self.cocoDt,
|
|
27
|
+
iouType="bbox",
|
|
28
|
+
progress_cb=p.update,
|
|
29
|
+
evaluation_params=self.evaluation_params,
|
|
23
30
|
)
|
|
24
31
|
self._dump_eval_results()
|
|
25
32
|
|
|
33
|
+
@classmethod
|
|
34
|
+
def validate_evaluation_params(cls, evaluation_params: dict) -> None:
|
|
35
|
+
iou_threshold = evaluation_params.get("iou_threshold")
|
|
36
|
+
if iou_threshold is not None:
|
|
37
|
+
assert iou_threshold in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], (
|
|
38
|
+
f"iou_threshold must be one of [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], "
|
|
39
|
+
f"but got {iou_threshold}"
|
|
40
|
+
)
|
|
41
|
+
|
|
26
42
|
def _convert_to_coco(self):
|
|
27
43
|
cocoGt_json = sly2coco(
|
|
28
44
|
self.gt_project_path,
|
|
@@ -62,7 +62,7 @@ class SpeedtestOverview(MetricVis):
|
|
|
62
62
|
res["columns"] = columns
|
|
63
63
|
res["columnsOptions"] = columns_options
|
|
64
64
|
|
|
65
|
-
widget.main_column =
|
|
65
|
+
widget.main_column = "Batch size"
|
|
66
66
|
widget.fixed_columns = 1
|
|
67
67
|
widget.show_header_controls = False
|
|
68
68
|
return res
|
|
@@ -15,12 +15,13 @@ class Overview(MetricVis):
|
|
|
15
15
|
def __init__(self, loader: Visualizer) -> None:
|
|
16
16
|
super().__init__(loader)
|
|
17
17
|
self._is_overview = True
|
|
18
|
-
info = loader.inference_info
|
|
18
|
+
info = loader.inference_info or {}
|
|
19
19
|
url = info.get("checkpoint_url")
|
|
20
20
|
link_text = info.get("custom_checkpoint_path")
|
|
21
21
|
if link_text is None:
|
|
22
22
|
link_text = url
|
|
23
|
-
link_text
|
|
23
|
+
if link_text is not None:
|
|
24
|
+
link_text = link_text.replace("_", "\_")
|
|
24
25
|
|
|
25
26
|
# Note about validation dataset
|
|
26
27
|
classes_str, note_about_val_dataset, train_session = self.get_overview_info()
|
|
@@ -65,6 +66,7 @@ class Overview(MetricVis):
|
|
|
65
66
|
self._loader.vis_texts.definitions.confidence_score,
|
|
66
67
|
],
|
|
67
68
|
),
|
|
69
|
+
table_key_metrics=Widget.Table(),
|
|
68
70
|
chart=Widget.Chart(),
|
|
69
71
|
)
|
|
70
72
|
|
|
@@ -117,7 +119,7 @@ class Overview(MetricVis):
|
|
|
117
119
|
return fig
|
|
118
120
|
|
|
119
121
|
def get_overview_info(self):
|
|
120
|
-
classes_cnt = len(self._loader._benchmark.classes_whitelist)
|
|
122
|
+
classes_cnt = len(self._loader._benchmark.classes_whitelist or [])
|
|
121
123
|
classes_str = "classes" if classes_cnt > 1 else "class"
|
|
122
124
|
classes_str = f"{classes_cnt} {classes_str}"
|
|
123
125
|
|
|
@@ -158,3 +160,30 @@ class Overview(MetricVis):
|
|
|
158
160
|
images_str += f". Evaluated on the whole project ({val_imgs_cnt} images)"
|
|
159
161
|
|
|
160
162
|
return classes_str, images_str, train_session
|
|
163
|
+
|
|
164
|
+
def get_table(self, widget: Widget.Table) -> dict:
|
|
165
|
+
res = {}
|
|
166
|
+
|
|
167
|
+
columns = ["metrics", "values"]
|
|
168
|
+
res["content"] = []
|
|
169
|
+
for metric, value in self._loader.mp.metric_table().items():
|
|
170
|
+
row = [metric, round(value, 2)]
|
|
171
|
+
dct = {
|
|
172
|
+
"row": row,
|
|
173
|
+
"id": metric,
|
|
174
|
+
"items": row,
|
|
175
|
+
}
|
|
176
|
+
res["content"].append(dct)
|
|
177
|
+
|
|
178
|
+
columns_options = [
|
|
179
|
+
{"customCell": True, "disableSort": True},
|
|
180
|
+
{"disableSort": True},
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
res["columns"] = columns
|
|
184
|
+
res["columnsOptions"] = columns_options
|
|
185
|
+
|
|
186
|
+
widget.main_column = columns[0]
|
|
187
|
+
widget.show_header_controls = False
|
|
188
|
+
widget.width = "60%"
|
|
189
|
+
return res
|
|
@@ -177,7 +177,7 @@ template_gallery_str = """<sly-iw-gallery
|
|
|
177
177
|
|
|
178
178
|
|
|
179
179
|
template_table_str = """
|
|
180
|
-
<div style="margin-top: 20px; margin-bottom: 30px;">
|
|
180
|
+
<div style="margin-top: 20px; margin-bottom: 30px; {{ width }}">
|
|
181
181
|
<sly-iw-table
|
|
182
182
|
iw-widget-id="{{ widget_id }}"
|
|
183
183
|
{% if clickable %}
|
|
@@ -208,11 +208,17 @@ template_table_str = """
|
|
|
208
208
|
slot-scope="{ row, column, cellValue }"
|
|
209
209
|
>
|
|
210
210
|
<div
|
|
211
|
-
v-if="column === '{{ mainColumn }}'"
|
|
211
|
+
v-if="column === ' ' && '{{ mainColumn }}' === 'Batch size'"
|
|
212
212
|
class="fflex"
|
|
213
213
|
>
|
|
214
214
|
<b>Batch size {{ '{{ cellValue }}' }}</b>
|
|
215
215
|
</div>
|
|
216
|
+
<div
|
|
217
|
+
v-if="column === '{{ mainColumn }}'"
|
|
218
|
+
class="fflex"
|
|
219
|
+
>
|
|
220
|
+
<b>{{ '{{ cellValue }}' }}</b>
|
|
221
|
+
</div>
|
|
216
222
|
</span>
|
|
217
223
|
</sly-iw-table>
|
|
218
224
|
</div>
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import os
|
|
3
4
|
import json
|
|
4
5
|
import pickle
|
|
5
6
|
from typing import TYPE_CHECKING, Dict, List, Tuple
|
|
@@ -155,6 +156,7 @@ class Visualizer:
|
|
|
155
156
|
cocoDt,
|
|
156
157
|
)
|
|
157
158
|
self.mp.calculate()
|
|
159
|
+
self._dump_key_metrics()
|
|
158
160
|
|
|
159
161
|
self.df_score_profile = pd.DataFrame(
|
|
160
162
|
self.mp.confidence_score_profile(), columns=["scores", "precision", "recall", "f1"]
|
|
@@ -187,6 +189,8 @@ class Visualizer:
|
|
|
187
189
|
|
|
188
190
|
initialized = [mv(self) for mv in ALL_METRICS]
|
|
189
191
|
if self.speedtest is not None:
|
|
192
|
+
if len(self.speedtest["speedtest"]) < 2:
|
|
193
|
+
SPEEDTEST_METRICS.pop()
|
|
190
194
|
initialized = initialized + [mv(self) for mv in SPEEDTEST_METRICS]
|
|
191
195
|
initialized = [mv for mv in initialized if self.cv_task.value in mv.cv_tasks]
|
|
192
196
|
with self.pbar(
|
|
@@ -351,6 +355,13 @@ class Visualizer:
|
|
|
351
355
|
json.dump(self._generate_state(metric_visualizations), f)
|
|
352
356
|
logger.info("Saved: %r", "state.json")
|
|
353
357
|
|
|
358
|
+
def _dump_key_metrics(self):
|
|
359
|
+
key_metrics = self.mp.json_metrics()
|
|
360
|
+
path = os.path.join(self._benchmark.get_base_dir(), "evaluation", "key_metrics.json")
|
|
361
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
362
|
+
json.dump(key_metrics, f)
|
|
363
|
+
return path
|
|
364
|
+
|
|
354
365
|
def update_diff_annotations(self):
|
|
355
366
|
meta = self._update_pred_meta_with_tags(self.dt_project_info.id, self.dt_project_meta)
|
|
356
367
|
self._update_diff_meta(meta)
|
|
@@ -553,11 +553,11 @@ supervisely/cli/teamfiles/teamfiles_upload.py,sha256=xnsW2rvdq1e-KGjF1tMBu7Oxh3n
|
|
|
553
553
|
supervisely/collection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
554
554
|
supervisely/collection/key_indexed_collection.py,sha256=x2UVlkprspWhhae9oLUzjTWBoIouiWY9UQSS_MozfH0,37643
|
|
555
555
|
supervisely/collection/str_enum.py,sha256=Zp29yFGvnxC6oJRYNNlXhO2lTSdsriU1wiGHj6ahEJE,1250
|
|
556
|
-
supervisely/convert/__init__.py,sha256=
|
|
557
|
-
supervisely/convert/base_converter.py,sha256=
|
|
558
|
-
supervisely/convert/converter.py,sha256=
|
|
556
|
+
supervisely/convert/__init__.py,sha256=gRTV93OYJPI3FNfy78HO2SfR59qQ3FFKFSy_jw1adJ8,2571
|
|
557
|
+
supervisely/convert/base_converter.py,sha256=F5oCwQZ_w7XlIIQnW-y-2scvA9sC8vCJL2XMP3DtPZg,14343
|
|
558
|
+
supervisely/convert/converter.py,sha256=0F93xZmyprua63C6KxIeh0AbaFab81LiWqlkOVrkaYU,8672
|
|
559
559
|
supervisely/convert/image/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
560
|
-
supervisely/convert/image/image_converter.py,sha256=
|
|
560
|
+
supervisely/convert/image/image_converter.py,sha256=Er-QX6qyVO4p72iJ3ae0YMF9gSfm_SUimoH6id0MIIs,9446
|
|
561
561
|
supervisely/convert/image/image_helper.py,sha256=21cpHiSzHS7brQpP15MAkrGh1lj7pOWh9Z976cObyIw,3145
|
|
562
562
|
supervisely/convert/image/cityscapes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
563
563
|
supervisely/convert/image/cityscapes/cityscapes_converter.py,sha256=msmsR2W-Xiod06dwn-MzmkbrEmQQqlKh7zyfTrW6YQw,7854
|
|
@@ -569,6 +569,9 @@ supervisely/convert/image/coco/coco_helper.py,sha256=ZjHlQIF7RReNROVGo5ZUANGv5iR
|
|
|
569
569
|
supervisely/convert/image/csv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
570
570
|
supervisely/convert/image/csv/csv_converter.py,sha256=iLyc2PAVtlsAq7blnGH4iS1_D7Ai6-4UsdI_RlDVB9Q,11677
|
|
571
571
|
supervisely/convert/image/csv/csv_helper.py,sha256=-nR192IfMU0vTlNRoKXu5FS6tTs9fENqySyeKKyemRs,8409
|
|
572
|
+
supervisely/convert/image/high_color/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
573
|
+
supervisely/convert/image/high_color/high_color_depth.py,sha256=t1LsOnEwhSmXUWxs2K7e_EHit5H0132_UibtFIt1P_w,5276
|
|
574
|
+
supervisely/convert/image/high_color/high_color_helper.py,sha256=bD5Jl6lmKbiNG_8Bpy-WvIYjSJqwdaVmXrHGVZT6o2w,1630
|
|
572
575
|
supervisely/convert/image/label_me/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
573
576
|
supervisely/convert/image/label_me/label_me_converter.py,sha256=E8epO8METLslVLy51AW7FoZto1jrPSFUZHDwzOnHbTk,3081
|
|
574
577
|
supervisely/convert/image/label_me/label_me_helper.py,sha256=_JO-IS2iiGVcWBHFoC8-LDBW4deJ_Rt4s3Sl5rFjXFg,8279
|
|
@@ -718,7 +721,7 @@ supervisely/nn/artifacts/unet.py,sha256=Gn8ADfwC4F-MABVDPRY7g_ZaAIaaOAEbhhIGII-o
|
|
|
718
721
|
supervisely/nn/artifacts/yolov5.py,sha256=6KDCyDlLO7AT9of1qHjCaG5mmxCv6C0p-zCk9KJ0PH4,1478
|
|
719
722
|
supervisely/nn/artifacts/yolov8.py,sha256=c3MzbOTYD6RT5N4F9oZ0SWXxyonjJ6ZQfZLYUHPRZg4,1204
|
|
720
723
|
supervisely/nn/benchmark/__init__.py,sha256=RxqbBx7cbzookq2DRvxYIaRofON9uxHeY5h8DqDbZq0,187
|
|
721
|
-
supervisely/nn/benchmark/base_benchmark.py,sha256=
|
|
724
|
+
supervisely/nn/benchmark/base_benchmark.py,sha256=LoDsT_F86Y9xztrTyfz74FmT619_rrZVUKnEtTzav0A,22755
|
|
722
725
|
supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
|
|
723
726
|
supervisely/nn/benchmark/instance_segmentation_benchmark.py,sha256=9iiWEH7KDw7ps0mQQdzIrCtCKg4umHekF3ws7jIGjmE,938
|
|
724
727
|
supervisely/nn/benchmark/object_detection_benchmark.py,sha256=s1S-L952etgz-UsDPyg69AgmFfAoJXvFHhITT8zB5iw,956
|
|
@@ -727,23 +730,24 @@ supervisely/nn/benchmark/coco_utils/__init__.py,sha256=MKxuzzBWpRCwR8kOb5NXUK8vD
|
|
|
727
730
|
supervisely/nn/benchmark/coco_utils/sly2coco.py,sha256=iudlcHNynthscH-V5qwCLk6VgIcxYrMEuAfGIjrOjZ0,6867
|
|
728
731
|
supervisely/nn/benchmark/coco_utils/utils.py,sha256=J9kM_Cn4XxfsrSQ8Rx6eb1UsS65-wOybaCkI9rQDeiU,504
|
|
729
732
|
supervisely/nn/benchmark/evaluation/__init__.py,sha256=1NGV_xEGe9lyPdE5gJ8AASKzm2WyZ_jKlh9WVvCQIaY,287
|
|
730
|
-
supervisely/nn/benchmark/evaluation/base_evaluator.py,sha256=
|
|
731
|
-
supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=
|
|
732
|
-
supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=
|
|
733
|
+
supervisely/nn/benchmark/evaluation/base_evaluator.py,sha256=htei2QGHsx-1DO16j-lUjflmUFpfSIv0drhGfe60qAU,1995
|
|
734
|
+
supervisely/nn/benchmark/evaluation/instance_segmentation_evaluator.py,sha256=p2VC4WXB2RQlmeRt14u74QTXSOwhkyQPCznw7Kqe32k,3773
|
|
735
|
+
supervisely/nn/benchmark/evaluation/object_detection_evaluator.py,sha256=0Sp0h9kNQUQYVRL3WrV9Vm7OniJPMYriEFbwcCejOBs,3615
|
|
733
736
|
supervisely/nn/benchmark/evaluation/coco/__init__.py,sha256=l6dFxp9aenywosQzQkIaDEI1p-DDQ63OgJJXxSVB4Mk,172
|
|
734
|
-
supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py,sha256=
|
|
735
|
-
supervisely/nn/benchmark/evaluation/coco/
|
|
737
|
+
supervisely/nn/benchmark/evaluation/coco/calculate_metrics.py,sha256=Pe5_bXJ57343PQ0TuYEkCMNUyp-YTyIXnPXUESuXQBk,11430
|
|
738
|
+
supervisely/nn/benchmark/evaluation/coco/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
|
|
739
|
+
supervisely/nn/benchmark/evaluation/coco/metric_provider.py,sha256=sEHY9mob6fjKG4_deMxu3lVGTNXhqbRDoFerBEwH5a0,19907
|
|
736
740
|
supervisely/nn/benchmark/evaluation/coco/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
|
|
737
741
|
supervisely/nn/benchmark/visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
738
742
|
supervisely/nn/benchmark/visualization/vis_click_data.py,sha256=4QdBZqJmmPYdcB7x565zOtXhDFRyXIB4tpu0V-_otoc,3724
|
|
739
|
-
supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=
|
|
740
|
-
supervisely/nn/benchmark/visualization/vis_templates.py,sha256=
|
|
741
|
-
supervisely/nn/benchmark/visualization/vis_widgets.py,sha256=
|
|
742
|
-
supervisely/nn/benchmark/visualization/visualizer.py,sha256=
|
|
743
|
+
supervisely/nn/benchmark/visualization/vis_metric_base.py,sha256=hXnbGAijnZ700GuzfvaHBxU5elQR0wXkBUNbmcSWCno,13941
|
|
744
|
+
supervisely/nn/benchmark/visualization/vis_templates.py,sha256=Vy019K8G8oJ9vN35tvsSjYA21xdldqIP-BALGEuy_eM,10169
|
|
745
|
+
supervisely/nn/benchmark/visualization/vis_widgets.py,sha256=oavMM2Z-05Hp_Fj086NgXAqDq2KPAqXfT-nJb5qlDsg,4103
|
|
746
|
+
supervisely/nn/benchmark/visualization/visualizer.py,sha256=tmx1coNpKv9dDmJb-lHhdjuiSq0oRnFC0GvSHwcXV4E,32149
|
|
743
747
|
supervisely/nn/benchmark/visualization/inference_speed/__init__.py,sha256=6Nahwt9R61_Jc1eWupXa70CgyRQ7tbUeiDWR26017rY,554
|
|
744
748
|
supervisely/nn/benchmark/visualization/inference_speed/speedtest_batch.py,sha256=73gbXs1uTfxxWH-UCJdR72m-48jMD5qVyMyolf5jNoc,6140
|
|
745
749
|
supervisely/nn/benchmark/visualization/inference_speed/speedtest_intro.py,sha256=ivUVriKyhx9ZtwVSqrAkUqq1SJGYYxNLwLQR1UgE4aM,900
|
|
746
|
-
supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py,sha256=
|
|
750
|
+
supervisely/nn/benchmark/visualization/inference_speed/speedtest_overview.py,sha256=QLgiiHJzmasnNmn6OGWfLef01gLOiM84uVBK5P8c954,4887
|
|
747
751
|
supervisely/nn/benchmark/visualization/inference_speed/speedtest_real_time.py,sha256=bVpNS3YBP0TGsqE_XQBuFMJI5ybDM0RZpEzFyT7cbkA,2157
|
|
748
752
|
supervisely/nn/benchmark/visualization/text_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
749
753
|
supervisely/nn/benchmark/visualization/text_templates/inference_speed_text.py,sha256=XGeBrbP-ROyKYbqYZzA281_IG45Ygu9NKyqG2I3o5TU,1124
|
|
@@ -762,7 +766,7 @@ supervisely/nn/benchmark/visualization/vis_metrics/model_predictions.py,sha256=3
|
|
|
762
766
|
supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts.py,sha256=rsm_hdE0pYCHY-5v0pjDIid71y2tPbzYbmH2Qw-RS-4,3983
|
|
763
767
|
supervisely/nn/benchmark/visualization/vis_metrics/outcome_counts_per_class.py,sha256=lSb2-jfplyERIUCi8_6P9aq6C77JGOKOJK20J824sEE,5623
|
|
764
768
|
supervisely/nn/benchmark/visualization/vis_metrics/overall_error_analysis.py,sha256=YHfueea2EkUgNGP4FCyKyCaCtCwaYeYNJ3WwfF-Hzi4,3553
|
|
765
|
-
supervisely/nn/benchmark/visualization/vis_metrics/overview.py,sha256=
|
|
769
|
+
supervisely/nn/benchmark/visualization/vis_metrics/overview.py,sha256=V-uNrtNhR5idywyfFSNOA4zFesTf5d6i2g8MNtLOhIw,6997
|
|
766
770
|
supervisely/nn/benchmark/visualization/vis_metrics/percision_avg_per_class.py,sha256=mm8IVM90EoIC_9GsiM-Jyhh6jPqQcHMo788VAvRAzMY,1877
|
|
767
771
|
supervisely/nn/benchmark/visualization/vis_metrics/pr_curve.py,sha256=4-AwEQk1ywuW4zXO_EXo7_aFMjenwhnLlGX2PWqiu0k,3574
|
|
768
772
|
supervisely/nn/benchmark/visualization/vis_metrics/pr_curve_by_class.py,sha256=9Uuibo38HVGPChPbCW8i3cMYdb6-NFlys1TBisp5zOU,1442
|
|
@@ -909,8 +913,8 @@ supervisely/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
909
913
|
supervisely/task/paths.py,sha256=Zk9zPhuJuq2eZnb8rLt1ED6sbwX6ikHYRSZQvDP0OBg,1197
|
|
910
914
|
supervisely/task/progress.py,sha256=C7E9rDNnNYJezjmjCJPL1oXqH-LVm15p3OLetvapWaE,27667
|
|
911
915
|
supervisely/task/task_logger.py,sha256=_uDfqEtiwetga6aDAqcTKCKqHjZJSuUXTsHSQFNxAvI,3531
|
|
912
|
-
supervisely/team_files/__init__.py,sha256=
|
|
913
|
-
supervisely/team_files/team_files_path.py,sha256=
|
|
916
|
+
supervisely/team_files/__init__.py,sha256=mtz0Z7eFvsykOcEnSVZ-5bNHsq0_YsNBjZK3qngO-DY,149
|
|
917
|
+
supervisely/team_files/team_files_path.py,sha256=dSqz-bycImQYwAs62TD1zCD1NQdysqCQIBhEVh9EDjw,177
|
|
914
918
|
supervisely/user/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
915
919
|
supervisely/user/user.py,sha256=4GSVIupPAxWjIxZmUtH3Dtms_vGV82-49kM_aaR2gBI,319
|
|
916
920
|
supervisely/video/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -953,9 +957,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
953
957
|
supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
|
|
954
958
|
supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
|
|
955
959
|
supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
|
|
956
|
-
supervisely-6.73.
|
|
957
|
-
supervisely-6.73.
|
|
958
|
-
supervisely-6.73.
|
|
959
|
-
supervisely-6.73.
|
|
960
|
-
supervisely-6.73.
|
|
961
|
-
supervisely-6.73.
|
|
960
|
+
supervisely-6.73.207.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
961
|
+
supervisely-6.73.207.dist-info/METADATA,sha256=jPDiqTxPbkSOwBCqz4V6xiMiBRiQ-n5fB6ZxFAaBYVw,33077
|
|
962
|
+
supervisely-6.73.207.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
963
|
+
supervisely-6.73.207.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
|
|
964
|
+
supervisely-6.73.207.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
|
|
965
|
+
supervisely-6.73.207.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|