supervisely 6.73.250__py3-none-any.whl → 6.73.252__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/api/dataset_api.py +17 -1
- supervisely/api/project_api.py +4 -1
- supervisely/api/volume/volume_annotation_api.py +7 -4
- supervisely/app/widgets/experiment_selector/experiment_selector.py +16 -8
- supervisely/nn/benchmark/base_benchmark.py +17 -2
- supervisely/nn/benchmark/base_evaluator.py +28 -6
- supervisely/nn/benchmark/instance_segmentation/benchmark.py +1 -1
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +14 -0
- supervisely/nn/benchmark/object_detection/benchmark.py +1 -1
- supervisely/nn/benchmark/object_detection/evaluator.py +43 -13
- supervisely/nn/benchmark/object_detection/metric_provider.py +7 -0
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +33 -7
- supervisely/nn/benchmark/utils/detection/utlis.py +6 -4
- supervisely/nn/experiments.py +23 -16
- supervisely/nn/inference/gui/serving_gui_template.py +2 -35
- supervisely/nn/inference/inference.py +71 -8
- supervisely/nn/training/__init__.py +2 -0
- supervisely/nn/training/gui/classes_selector.py +14 -14
- supervisely/nn/training/gui/gui.py +28 -13
- supervisely/nn/training/gui/hyperparameters_selector.py +90 -41
- supervisely/nn/training/gui/input_selector.py +8 -6
- supervisely/nn/training/gui/model_selector.py +7 -5
- supervisely/nn/training/gui/train_val_splits_selector.py +8 -9
- supervisely/nn/training/gui/training_logs.py +17 -17
- supervisely/nn/training/gui/training_process.py +41 -36
- supervisely/nn/training/loggers/__init__.py +22 -0
- supervisely/nn/training/loggers/base_train_logger.py +8 -5
- supervisely/nn/training/loggers/tensorboard_logger.py +4 -11
- supervisely/nn/training/train_app.py +276 -90
- {supervisely-6.73.250.dist-info → supervisely-6.73.252.dist-info}/METADATA +8 -3
- {supervisely-6.73.250.dist-info → supervisely-6.73.252.dist-info}/RECORD +35 -35
- {supervisely-6.73.250.dist-info → supervisely-6.73.252.dist-info}/LICENSE +0 -0
- {supervisely-6.73.250.dist-info → supervisely-6.73.252.dist-info}/WHEEL +0 -0
- {supervisely-6.73.250.dist-info → supervisely-6.73.252.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.250.dist-info → supervisely-6.73.252.dist-info}/top_level.txt +0 -0
supervisely/api/dataset_api.py
CHANGED
|
@@ -263,6 +263,21 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
|
|
|
263
263
|
raise KeyError(f"Dataset with id={id} not found in your account")
|
|
264
264
|
return info
|
|
265
265
|
|
|
266
|
+
def _get_effective_new_name(
|
|
267
|
+
self, project_id: int, name: str, change_name_if_conflict: bool, parent_id: int = None
|
|
268
|
+
):
|
|
269
|
+
return (
|
|
270
|
+
self._get_free_name(
|
|
271
|
+
exist_check_fn=lambda name: self.get_info_by_name(
|
|
272
|
+
project_id, name, parent_id=parent_id
|
|
273
|
+
)
|
|
274
|
+
is not None,
|
|
275
|
+
name=name,
|
|
276
|
+
)
|
|
277
|
+
if change_name_if_conflict
|
|
278
|
+
else name
|
|
279
|
+
)
|
|
280
|
+
|
|
266
281
|
def create(
|
|
267
282
|
self,
|
|
268
283
|
project_id: int,
|
|
@@ -306,9 +321,10 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
|
|
|
306
321
|
print(len(new_ds_info)) # 2
|
|
307
322
|
"""
|
|
308
323
|
effective_name = self._get_effective_new_name(
|
|
309
|
-
|
|
324
|
+
project_id=project_id,
|
|
310
325
|
name=name,
|
|
311
326
|
change_name_if_conflict=change_name_if_conflict,
|
|
327
|
+
parent_id=parent_id,
|
|
312
328
|
)
|
|
313
329
|
response = self._api.post(
|
|
314
330
|
"datasets.add",
|
supervisely/api/project_api.py
CHANGED
|
@@ -95,6 +95,7 @@ class ProjectInfo(NamedTuple):
|
|
|
95
95
|
settings: dict
|
|
96
96
|
import_settings: dict
|
|
97
97
|
version: dict
|
|
98
|
+
created_by_id: int
|
|
98
99
|
|
|
99
100
|
@property
|
|
100
101
|
def image_preview_url(self):
|
|
@@ -169,6 +170,7 @@ class ProjectApi(CloneableModuleApi, UpdateableModule, RemoveableModuleApi):
|
|
|
169
170
|
backup_archive={},
|
|
170
171
|
team_id=2,
|
|
171
172
|
import_settings={}
|
|
173
|
+
version={'id': 260, 'version': 3}
|
|
172
174
|
)
|
|
173
175
|
"""
|
|
174
176
|
return [
|
|
@@ -191,6 +193,7 @@ class ProjectApi(CloneableModuleApi, UpdateableModule, RemoveableModuleApi):
|
|
|
191
193
|
ApiField.SETTINGS,
|
|
192
194
|
ApiField.IMPORT_SETTINGS,
|
|
193
195
|
ApiField.VERSION,
|
|
196
|
+
ApiField.CREATED_BY_ID,
|
|
194
197
|
]
|
|
195
198
|
|
|
196
199
|
@staticmethod
|
|
@@ -427,7 +430,7 @@ class ProjectApi(CloneableModuleApi, UpdateableModule, RemoveableModuleApi):
|
|
|
427
430
|
"""
|
|
428
431
|
|
|
429
432
|
fields = [
|
|
430
|
-
x for x in self.info_sequence() if x not in (ApiField.ITEMS_COUNT, ApiField.SETTINGS)
|
|
433
|
+
x for x in self.info_sequence() if x not in (ApiField.ITEMS_COUNT, ApiField.SETTINGS, ApiField.CREATED_BY_ID)
|
|
431
434
|
]
|
|
432
435
|
|
|
433
436
|
info = super().get_info_by_name(parent_id, name, fields)
|
|
@@ -128,7 +128,9 @@ class VolumeAnnotationAPI(EntityAnnotationAPI):
|
|
|
128
128
|
volume_info = self._api.volume.get_info_by_id(volume_id)
|
|
129
129
|
return self._download(volume_info.dataset_id, volume_id)
|
|
130
130
|
|
|
131
|
-
def append(
|
|
131
|
+
def append(
|
|
132
|
+
self, volume_id: int, ann: VolumeAnnotation, key_id_map: KeyIdMap = None, volume_info=None
|
|
133
|
+
):
|
|
132
134
|
"""
|
|
133
135
|
Loads VolumeAnnotation to a given volume ID in the API.
|
|
134
136
|
|
|
@@ -159,13 +161,14 @@ class VolumeAnnotationAPI(EntityAnnotationAPI):
|
|
|
159
161
|
else:
|
|
160
162
|
figures = ann.figures
|
|
161
163
|
|
|
162
|
-
|
|
164
|
+
if volume_info is None:
|
|
165
|
+
volume_info = self._api.volume.get_info_by_id(volume_id)
|
|
163
166
|
self._append(
|
|
164
167
|
self._api.volume.tag,
|
|
165
168
|
self._api.volume.object,
|
|
166
169
|
self._api.volume.figure,
|
|
167
|
-
|
|
168
|
-
|
|
170
|
+
volume_info.project_id,
|
|
171
|
+
volume_info.dataset_id,
|
|
169
172
|
volume_id,
|
|
170
173
|
ann.tags,
|
|
171
174
|
ann.objects,
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from collections import defaultdict
|
|
3
3
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
4
|
+
from dataclasses import asdict
|
|
4
5
|
from typing import Any, Callable, Dict, List, Union
|
|
5
6
|
|
|
6
7
|
from supervisely import env, logger
|
|
@@ -91,7 +92,7 @@ class ExperimentSelector(Widget):
|
|
|
91
92
|
self._session_link = self._generate_session_link()
|
|
92
93
|
|
|
93
94
|
# col 6 benchmark report
|
|
94
|
-
self.
|
|
95
|
+
self._benchmark_report_id = experiment_info.evaluation_report_id
|
|
95
96
|
|
|
96
97
|
# widgets
|
|
97
98
|
self._task_widget = self._create_task_widget()
|
|
@@ -191,7 +192,7 @@ class ExperimentSelector(Widget):
|
|
|
191
192
|
task_widget = Container(
|
|
192
193
|
[
|
|
193
194
|
Text(
|
|
194
|
-
f"<i class='zmdi zmdi-folder' style='color: #7f858e'></i> <a href='{self._task_link}'>{self._task_id}</a>",
|
|
195
|
+
f"<i class='zmdi zmdi-folder' style='color: #7f858e'></i> <a href='{self._task_link}' target='_blank'>{self._task_id}</a>",
|
|
195
196
|
"text",
|
|
196
197
|
),
|
|
197
198
|
Text(
|
|
@@ -237,22 +238,29 @@ class ExperimentSelector(Widget):
|
|
|
237
238
|
|
|
238
239
|
def _create_session_widget(self) -> Text:
|
|
239
240
|
session_link_widget = Text(
|
|
240
|
-
f"<a href='{self._session_link}'>Preview</a> <i class='zmdi zmdi-open-in-new'></i>",
|
|
241
|
+
f"<a href='{self._session_link}' target='_blank'>Preview</a> <i class='zmdi zmdi-open-in-new'></i>",
|
|
241
242
|
"text",
|
|
242
243
|
)
|
|
243
244
|
return session_link_widget
|
|
244
245
|
|
|
245
246
|
def _create_benchmark_widget(self) -> Text:
|
|
246
|
-
if self.
|
|
247
|
-
self.
|
|
247
|
+
if self._benchmark_report_id is None:
|
|
248
|
+
self._benchmark_report_id = "No evaluation report available"
|
|
248
249
|
benchmark_widget = Text(
|
|
249
|
-
"<span class='field-description text-muted' style='color: #7f858e'>No
|
|
250
|
+
"<span class='field-description text-muted' style='color: #7f858e'>No evaluation report available</span>",
|
|
250
251
|
"text",
|
|
251
252
|
font_size=13,
|
|
252
253
|
)
|
|
253
254
|
else:
|
|
255
|
+
if is_development():
|
|
256
|
+
benchmark_report_link = abs_url(
|
|
257
|
+
f"/model-benchmark?id={self._benchmark_report_id}"
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
benchmark_report_link = f"/model-benchmark?id={self._benchmark_report_id}"
|
|
261
|
+
|
|
254
262
|
benchmark_widget = Text(
|
|
255
|
-
f"<
|
|
263
|
+
f"<i class='zmdi zmdi-chart' style='color: #7f858e'></i> <a href='{benchmark_report_link}' target='_blank'>evaluation report</a>",
|
|
256
264
|
"text",
|
|
257
265
|
)
|
|
258
266
|
return benchmark_widget
|
|
@@ -432,7 +440,7 @@ class ExperimentSelector(Widget):
|
|
|
432
440
|
if len(self._rows) == 0:
|
|
433
441
|
return
|
|
434
442
|
selected_row = self.get_selected_row()
|
|
435
|
-
selected_row_json = selected_row._experiment_info
|
|
443
|
+
selected_row_json = asdict(selected_row._experiment_info)
|
|
436
444
|
return selected_row_json
|
|
437
445
|
|
|
438
446
|
def get_selected_checkpoint_path(self) -> str:
|
|
@@ -74,6 +74,8 @@ class BaseBenchmark:
|
|
|
74
74
|
self.train_info = None
|
|
75
75
|
self.evaluator_app_info = None
|
|
76
76
|
self.evaluation_params = evaluation_params
|
|
77
|
+
self._eval_results = None
|
|
78
|
+
self.report_id = None
|
|
77
79
|
self._validate_evaluation_params()
|
|
78
80
|
|
|
79
81
|
def _get_evaluator_class(self) -> type:
|
|
@@ -87,6 +89,11 @@ class BaseBenchmark:
|
|
|
87
89
|
def hardware(self) -> str:
|
|
88
90
|
return self._hardware
|
|
89
91
|
|
|
92
|
+
@property
|
|
93
|
+
def key_metrics(self):
|
|
94
|
+
eval_results = self.get_eval_result()
|
|
95
|
+
return eval_results.key_metrics
|
|
96
|
+
|
|
90
97
|
def run_evaluation(
|
|
91
98
|
self,
|
|
92
99
|
model_session: Union[int, str, SessionJSON],
|
|
@@ -482,8 +489,10 @@ class BaseBenchmark:
|
|
|
482
489
|
f"Visualizer class is not defined in {self.__class__.__name__}. "
|
|
483
490
|
"It should be defined in the subclass of BaseBenchmark (e.g. ObjectDetectionBenchmark)."
|
|
484
491
|
)
|
|
485
|
-
eval_result = self.
|
|
486
|
-
vis = self.visualizer_cls(
|
|
492
|
+
eval_result = self.get_eval_result()
|
|
493
|
+
vis = self.visualizer_cls( # pylint: disable=not-callable
|
|
494
|
+
self.api, [eval_result], self.get_layout_results_dir(), self.pbar
|
|
495
|
+
)
|
|
487
496
|
with self.pbar(message="Visualizations: Rendering layout", total=1) as p:
|
|
488
497
|
vis.visualize()
|
|
489
498
|
p.update(1)
|
|
@@ -566,6 +575,7 @@ class BaseBenchmark:
|
|
|
566
575
|
def upload_report_link(self, remote_dir: str):
|
|
567
576
|
template_path = os.path.join(remote_dir, "template.vue")
|
|
568
577
|
vue_template_info = self.api.file.get_info_by_path(self.team_id, template_path)
|
|
578
|
+
self.report_id = vue_template_info.id
|
|
569
579
|
|
|
570
580
|
report_link = "/model-benchmark?id=" + str(vue_template_info.id)
|
|
571
581
|
local_path = os.path.join(self.get_layout_results_dir(), "open.lnk")
|
|
@@ -608,3 +618,8 @@ class BaseBenchmark:
|
|
|
608
618
|
return sum(ds.items_count for ds in self.gt_dataset_infos)
|
|
609
619
|
else:
|
|
610
620
|
return self.gt_project_info.items_count
|
|
621
|
+
|
|
622
|
+
def get_eval_result(self):
|
|
623
|
+
if self._eval_results is None:
|
|
624
|
+
self._eval_results = self.evaluator.get_eval_result()
|
|
625
|
+
return self._eval_results
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import os
|
|
2
4
|
import pickle
|
|
3
5
|
from typing import Dict, List, Optional, Union
|
|
@@ -9,23 +11,34 @@ from supervisely.task.progress import tqdm_sly
|
|
|
9
11
|
|
|
10
12
|
|
|
11
13
|
class BaseEvalResult:
|
|
12
|
-
def __init__(self, directory: str):
|
|
14
|
+
def __init__(self, directory: Optional[str] = None):
|
|
13
15
|
self.directory = directory
|
|
14
16
|
self.inference_info: Dict = None
|
|
15
17
|
self.speedtest_info: Dict = None
|
|
16
18
|
self.eval_data: Dict = None
|
|
17
19
|
self.mp = None
|
|
18
20
|
|
|
19
|
-
self.
|
|
21
|
+
if self.directory is not None:
|
|
22
|
+
self._read_files(self.directory)
|
|
23
|
+
self._prepare_data()
|
|
24
|
+
|
|
25
|
+
@classmethod
|
|
26
|
+
def from_evaluator(cls, evaulator: BaseEvaluator) -> BaseEvalResult:
|
|
27
|
+
"""Method to customize loading of the evaluation result."""
|
|
28
|
+
raise NotImplementedError()
|
|
20
29
|
|
|
21
30
|
@property
|
|
22
31
|
def cv_task(self):
|
|
23
32
|
return self.inference_info.get("task_type")
|
|
24
33
|
|
|
25
34
|
@property
|
|
26
|
-
def name(self) -> str:
|
|
27
|
-
|
|
28
|
-
return
|
|
35
|
+
def name(self) -> Union[str, None]:
|
|
36
|
+
deploy_params = self.inference_info.get("deploy_params", {})
|
|
37
|
+
return (
|
|
38
|
+
deploy_params.get("checkpoint_name")
|
|
39
|
+
or deploy_params.get("model_name")
|
|
40
|
+
or self.inference_info.get("model_name")
|
|
41
|
+
)
|
|
29
42
|
|
|
30
43
|
@property
|
|
31
44
|
def gt_project_id(self) -> int:
|
|
@@ -59,7 +72,16 @@ class BaseEvalResult:
|
|
|
59
72
|
def classes_whitelist(self):
|
|
60
73
|
return self.inference_info.get("inference_settings", {}).get("classes", []) # TODO: check
|
|
61
74
|
|
|
62
|
-
def
|
|
75
|
+
def _read_files(self, path: str) -> None:
|
|
76
|
+
"""Read all necessary files from the directory"""
|
|
77
|
+
raise NotImplementedError()
|
|
78
|
+
|
|
79
|
+
def _prepare_data(self) -> None:
|
|
80
|
+
"""Prepare data to allow easy access to the data"""
|
|
81
|
+
raise NotImplementedError()
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def key_metrics(self):
|
|
63
85
|
raise NotImplementedError()
|
|
64
86
|
|
|
65
87
|
|
|
@@ -29,5 +29,5 @@ class InstanceSegmentationBenchmark(ObjectDetectionBenchmark):
|
|
|
29
29
|
):
|
|
30
30
|
assert try_set_conf_auto(
|
|
31
31
|
self.session, CONF_THRES
|
|
32
|
-
), f"Unable to set the confidence threshold to {CONF_THRES} for
|
|
32
|
+
), f"Unable to set the confidence threshold to {CONF_THRES} for evaluation."
|
|
33
33
|
return super()._run_inference(output_project_id, batch_size, cache_project_on_agent)
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import os
|
|
2
4
|
from pathlib import Path
|
|
3
5
|
|
|
@@ -13,6 +15,18 @@ from supervisely.nn.benchmark.utils import calculate_metrics, read_coco_datasets
|
|
|
13
15
|
class InstanceSegmentationEvalResult(ObjectDetectionEvalResult):
|
|
14
16
|
mp_cls = MetricProvider
|
|
15
17
|
|
|
18
|
+
@classmethod
|
|
19
|
+
def from_evaluator(
|
|
20
|
+
cls, evaulator: InstanceSegmentationEvaluator
|
|
21
|
+
) -> InstanceSegmentationEvalResult:
|
|
22
|
+
"""Method to customize loading of the evaluation result."""
|
|
23
|
+
eval_result = cls()
|
|
24
|
+
eval_result.eval_data = evaulator.eval_data
|
|
25
|
+
eval_result.coco_gt = evaulator.cocoGt
|
|
26
|
+
eval_result.coco_dt = evaulator.cocoDt
|
|
27
|
+
eval_result._prepare_data()
|
|
28
|
+
return eval_result
|
|
29
|
+
|
|
16
30
|
|
|
17
31
|
class InstanceSegmentationEvaluator(ObjectDetectionEvaluator):
|
|
18
32
|
EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/evaluation_params.yaml"
|
|
@@ -27,5 +27,5 @@ class ObjectDetectionBenchmark(BaseBenchmark):
|
|
|
27
27
|
):
|
|
28
28
|
assert try_set_conf_auto(
|
|
29
29
|
self.session, CONF_THRES
|
|
30
|
-
), f"Unable to set the confidence threshold to {CONF_THRES} for
|
|
30
|
+
), f"Unable to set the confidence threshold to {CONF_THRES} for evaluation."
|
|
31
31
|
return super()._run_inference(output_project_id, batch_size, cache_project_on_agent)
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import os
|
|
2
4
|
import pickle
|
|
3
5
|
from pathlib import Path
|
|
@@ -18,25 +20,39 @@ from supervisely.nn.benchmark.visualization.vis_click_data import ClickData, IdM
|
|
|
18
20
|
class ObjectDetectionEvalResult(BaseEvalResult):
|
|
19
21
|
mp_cls = MetricProvider
|
|
20
22
|
|
|
21
|
-
def
|
|
22
|
-
|
|
23
|
+
def _read_files(self, path: str) -> None:
|
|
24
|
+
"""Read all necessary files from the directory"""
|
|
25
|
+
|
|
26
|
+
self.coco_gt = Path(path) / "cocoGt.json"
|
|
27
|
+
self.coco_dt = Path(path) / "cocoDt.json"
|
|
23
28
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
coco_gt, coco_dt = COCO(gt_path), COCO(dt_path)
|
|
27
|
-
self.coco_gt = coco_gt
|
|
28
|
-
self.coco_dt = coco_dt
|
|
29
|
-
self.eval_data = None
|
|
30
|
-
with open(Path(self.directory, "eval_data.pkl"), "rb") as f:
|
|
31
|
-
self.eval_data = pickle.load(f)
|
|
29
|
+
if self.coco_gt.exists() and self.coco_dt.exists():
|
|
30
|
+
self.coco_gt, self.coco_dt = read_coco_datasets(self.coco_gt, self.coco_dt)
|
|
32
31
|
|
|
33
|
-
|
|
34
|
-
|
|
32
|
+
eval_data_path = Path(path) / "eval_data.pkl"
|
|
33
|
+
if eval_data_path.exists():
|
|
34
|
+
with open(Path(path, "eval_data.pkl"), "rb") as f:
|
|
35
|
+
self.eval_data = pickle.load(f)
|
|
35
36
|
|
|
36
|
-
|
|
37
|
+
inference_info_path = Path(path) / "inference_info.json"
|
|
38
|
+
if inference_info_path.exists():
|
|
39
|
+
self.inference_info = load_json_file(str(inference_info_path))
|
|
40
|
+
|
|
41
|
+
speedtest_info_path = Path(path).parent / "speedtest" / "speedtest.json"
|
|
37
42
|
if speedtest_info_path.exists():
|
|
38
43
|
self.speedtest_info = load_json_file(str(speedtest_info_path))
|
|
39
44
|
|
|
45
|
+
def _prepare_data(self) -> None:
|
|
46
|
+
"""Prepare data to allow easy access to the most important parts"""
|
|
47
|
+
|
|
48
|
+
from pycocotools.coco import COCO # pylint: disable=import-error
|
|
49
|
+
|
|
50
|
+
if not hasattr(self, "coco_gt") or not hasattr(self, "coco_dt"):
|
|
51
|
+
raise ValueError("GT and DT datasets are not provided")
|
|
52
|
+
|
|
53
|
+
if not isinstance(self.coco_gt, COCO) and not isinstance(self.coco_dt, COCO):
|
|
54
|
+
self.coco_gt, self.coco_dt = read_coco_datasets(self.coco_gt, self.coco_dt)
|
|
55
|
+
|
|
40
56
|
self.mp = MetricProvider(
|
|
41
57
|
self.eval_data["matches"],
|
|
42
58
|
self.eval_data["coco_metrics"],
|
|
@@ -62,6 +78,20 @@ class ObjectDetectionEvalResult(BaseEvalResult):
|
|
|
62
78
|
|
|
63
79
|
self.click_data = ClickData(self.mp.m, gt_id_mapper, dt_id_mapper)
|
|
64
80
|
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_evaluator(cls, evaulator: ObjectDetectionEvaluator) -> ObjectDetectionEvalResult:
|
|
83
|
+
"""Method to customize loading of the evaluation result."""
|
|
84
|
+
eval_result = cls()
|
|
85
|
+
eval_result.eval_data = evaulator.eval_data
|
|
86
|
+
eval_result.coco_gt = evaulator.cocoGt
|
|
87
|
+
eval_result.coco_dt = evaulator.cocoDt
|
|
88
|
+
eval_result._prepare_data()
|
|
89
|
+
return eval_result
|
|
90
|
+
|
|
91
|
+
@property
|
|
92
|
+
def key_metrics(self):
|
|
93
|
+
return self.mp.key_metrics()
|
|
94
|
+
|
|
65
95
|
|
|
66
96
|
class ObjectDetectionEvaluator(BaseEvaluator):
|
|
67
97
|
EVALUATION_PARAMS_YAML_PATH = f"{Path(__file__).parent}/evaluation_params.yaml"
|
|
@@ -164,6 +164,13 @@ class MetricProvider:
|
|
|
164
164
|
f"AP{iou_name}_by_class": ap_custom_by_class,
|
|
165
165
|
}
|
|
166
166
|
|
|
167
|
+
def key_metrics(self):
|
|
168
|
+
iou_name = int(self.iou_threshold * 100)
|
|
169
|
+
json_metrics = self.json_metrics()
|
|
170
|
+
json_metrics.pop("AP_by_class")
|
|
171
|
+
json_metrics.pop(f"AP{iou_name}_by_class")
|
|
172
|
+
return json_metrics
|
|
173
|
+
|
|
167
174
|
def metric_table(self):
|
|
168
175
|
table = self.json_metrics()
|
|
169
176
|
iou_name = int(self.iou_threshold * 100)
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import os
|
|
2
4
|
import pickle
|
|
3
5
|
import shutil
|
|
@@ -24,16 +26,40 @@ from supervisely.sly_logger import logger
|
|
|
24
26
|
class SemanticSegmentationEvalResult(BaseEvalResult):
|
|
25
27
|
mp_cls = MetricProvider
|
|
26
28
|
|
|
27
|
-
def
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
29
|
+
def _read_files(self, path: str) -> None:
|
|
30
|
+
"""Read all necessary files from the directory"""
|
|
31
|
+
|
|
32
|
+
eval_data_path = Path(path) / "eval_data.pkl"
|
|
33
|
+
if eval_data_path.exists():
|
|
34
|
+
with open(Path(path, "eval_data.pkl"), "rb") as f:
|
|
35
|
+
self.eval_data = pickle.load(f)
|
|
36
|
+
|
|
37
|
+
inference_info_path = Path(path) / "inference_info.json"
|
|
38
|
+
if inference_info_path.exists():
|
|
39
|
+
self.inference_info = load_json_file(str(inference_info_path))
|
|
40
|
+
|
|
41
|
+
speedtest_info_path = Path(path).parent / "speedtest" / "speedtest.json"
|
|
32
42
|
if speedtest_info_path.exists():
|
|
33
|
-
self.speedtest_info = load_json_file(speedtest_info_path)
|
|
43
|
+
self.speedtest_info = load_json_file(str(speedtest_info_path))
|
|
44
|
+
|
|
45
|
+
def _prepare_data(self) -> None:
|
|
46
|
+
"""Prepare data to allow easy access to the most important parts"""
|
|
34
47
|
|
|
35
48
|
self.mp = MetricProvider(self.eval_data)
|
|
36
|
-
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def from_evaluator(
|
|
52
|
+
cls, evaulator: SemanticSegmentationEvaluator
|
|
53
|
+
) -> SemanticSegmentationEvalResult:
|
|
54
|
+
"""Method to customize loading of the evaluation result."""
|
|
55
|
+
eval_result = cls()
|
|
56
|
+
eval_result.eval_data = evaulator.eval_data
|
|
57
|
+
eval_result._prepare_data()
|
|
58
|
+
return eval_result
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def key_metrics(self):
|
|
62
|
+
return self.mp.json_metrics()
|
|
37
63
|
|
|
38
64
|
|
|
39
65
|
class SemanticSegmentationEvaluator(BaseEvaluator):
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
1
3
|
from supervisely.io.json import load_json_file
|
|
2
4
|
from supervisely.nn.inference import SessionJSON
|
|
3
5
|
|
|
@@ -15,10 +17,10 @@ def try_set_conf_auto(session: SessionJSON, conf: float):
|
|
|
15
17
|
def read_coco_datasets(cocoGt_json, cocoDt_json):
|
|
16
18
|
from pycocotools.coco import COCO # pylint: disable=import-error
|
|
17
19
|
|
|
18
|
-
if isinstance(cocoGt_json, str):
|
|
19
|
-
cocoGt_json = load_json_file(cocoGt_json)
|
|
20
|
-
if isinstance(cocoDt_json, str):
|
|
21
|
-
cocoDt_json = load_json_file(cocoDt_json)
|
|
20
|
+
if isinstance(cocoGt_json, (str, Path)):
|
|
21
|
+
cocoGt_json = load_json_file(str(cocoGt_json))
|
|
22
|
+
if isinstance(cocoDt_json, (str, Path)):
|
|
23
|
+
cocoDt_json = load_json_file(str(cocoDt_json))
|
|
22
24
|
cocoGt = COCO()
|
|
23
25
|
cocoGt.dataset = cocoGt_json
|
|
24
26
|
cocoGt.createIndex()
|
supervisely/nn/experiments.py
CHANGED
|
@@ -1,15 +1,18 @@
|
|
|
1
1
|
from concurrent.futures import ThreadPoolExecutor
|
|
2
|
+
from dataclasses import dataclass, fields
|
|
2
3
|
from json import JSONDecodeError
|
|
3
4
|
from os.path import dirname, join
|
|
4
|
-
from typing import Any, Dict, List
|
|
5
|
+
from typing import Any, Dict, List
|
|
5
6
|
|
|
6
7
|
import requests
|
|
7
8
|
|
|
8
9
|
from supervisely import logger
|
|
9
10
|
from supervisely.api.api import Api, ApiField
|
|
11
|
+
from supervisely.api.file_api import FileInfo
|
|
10
12
|
|
|
11
13
|
|
|
12
|
-
|
|
14
|
+
@dataclass
|
|
15
|
+
class ExperimentInfo:
|
|
13
16
|
experiment_name: str
|
|
14
17
|
"""Name of the experiment. Defined by the user in the training app"""
|
|
15
18
|
framework_name: str
|
|
@@ -22,12 +25,14 @@ class ExperimentInfo(NamedTuple):
|
|
|
22
25
|
"""Project ID in Supervisely"""
|
|
23
26
|
task_id: int
|
|
24
27
|
"""Task ID in Supervisely"""
|
|
25
|
-
model_files:
|
|
28
|
+
model_files: dict
|
|
26
29
|
"""Dictionary with paths to model files that needs to be downloaded for training"""
|
|
27
30
|
checkpoints: List[str]
|
|
28
31
|
"""List of relative paths to checkpoints"""
|
|
29
32
|
best_checkpoint: str
|
|
30
33
|
"""Name of the best checkpoint. Defined by the user in the training app"""
|
|
34
|
+
export: dict
|
|
35
|
+
"""Dictionary with exported weights in different formats"""
|
|
31
36
|
app_state: str
|
|
32
37
|
"""Path to file with settings that were used in the app"""
|
|
33
38
|
model_meta: str
|
|
@@ -42,7 +47,7 @@ class ExperimentInfo(NamedTuple):
|
|
|
42
47
|
"""Date and time when the experiment was started"""
|
|
43
48
|
evaluation_report_id: int
|
|
44
49
|
"""ID of the evaluation report"""
|
|
45
|
-
|
|
50
|
+
evaluation_metrics: dict
|
|
46
51
|
"""Evaluation metrics"""
|
|
47
52
|
|
|
48
53
|
|
|
@@ -74,7 +79,7 @@ def get_experiment_infos(api: Api, team_id: int, framework_name: str) -> List[Ex
|
|
|
74
79
|
experiment_infos = []
|
|
75
80
|
|
|
76
81
|
file_infos = api.file.list(team_id, experiments_folder, recursive=True, return_type="fileinfo")
|
|
77
|
-
|
|
82
|
+
sorted_experiment_paths = []
|
|
78
83
|
for file_info in file_infos:
|
|
79
84
|
if not file_info.path.endswith(metadata_name):
|
|
80
85
|
continue
|
|
@@ -82,32 +87,34 @@ def get_experiment_infos(api: Api, team_id: int, framework_name: str) -> List[Ex
|
|
|
82
87
|
experiment_dir = dirname(file_info.path)
|
|
83
88
|
if experiment_dir.endswith(framework_name):
|
|
84
89
|
experiment_path = join(experiment_dir, metadata_name)
|
|
85
|
-
|
|
90
|
+
sorted_experiment_paths.append(experiment_path)
|
|
86
91
|
|
|
87
|
-
def fetch_experiment_data(
|
|
92
|
+
def fetch_experiment_data(experiment_path: str):
|
|
88
93
|
try:
|
|
89
94
|
response = api.post(
|
|
90
95
|
"file-storage.download",
|
|
91
|
-
{ApiField.TEAM_ID: team_id, ApiField.PATH:
|
|
96
|
+
{ApiField.TEAM_ID: team_id, ApiField.PATH: experiment_path},
|
|
92
97
|
stream=True,
|
|
93
98
|
)
|
|
94
99
|
response.raise_for_status()
|
|
95
100
|
response_json = response.json()
|
|
96
|
-
required_fields = {field for field in ExperimentInfo
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
)
|
|
101
|
+
required_fields = {field.name for field in fields(ExperimentInfo)}
|
|
102
|
+
missing_fields = required_fields - response_json.keys()
|
|
103
|
+
if missing_fields:
|
|
104
|
+
logger.debug(f"Missing fields: {missing_fields} for '{experiment_path}'")
|
|
101
105
|
return None
|
|
102
106
|
return ExperimentInfo(**response_json)
|
|
103
107
|
except requests.exceptions.RequestException as e:
|
|
104
|
-
logger.debug(f"
|
|
108
|
+
logger.debug(f"Request failed for '{experiment_path}': {e}")
|
|
105
109
|
except JSONDecodeError as e:
|
|
106
|
-
logger.debug(f"
|
|
110
|
+
logger.debug(f"JSON decode failed for '{experiment_path}': {e}")
|
|
111
|
+
except TypeError as e:
|
|
112
|
+
logger.error(f"TypeError for '{experiment_path}': {e}")
|
|
107
113
|
return None
|
|
108
114
|
|
|
115
|
+
# Error
|
|
109
116
|
with ThreadPoolExecutor() as executor:
|
|
110
|
-
experiment_infos = list(executor.map(fetch_experiment_data,
|
|
117
|
+
experiment_infos = list(executor.map(fetch_experiment_data, sorted_experiment_paths))
|
|
111
118
|
|
|
112
119
|
experiment_infos = [info for info in experiment_infos if info is not None]
|
|
113
120
|
return experiment_infos
|
|
@@ -30,7 +30,7 @@ class ServingGUITemplate(ServingGUI):
|
|
|
30
30
|
def __init__(
|
|
31
31
|
self,
|
|
32
32
|
framework_name: str,
|
|
33
|
-
models: Optional[
|
|
33
|
+
models: Optional[list] = None,
|
|
34
34
|
app_options: Optional[str] = None,
|
|
35
35
|
):
|
|
36
36
|
if not isinstance(framework_name, str):
|
|
@@ -41,7 +41,7 @@ class ServingGUITemplate(ServingGUI):
|
|
|
41
41
|
self.team_id = sly_env.team_id()
|
|
42
42
|
|
|
43
43
|
self.framework_name = framework_name
|
|
44
|
-
self.models =
|
|
44
|
+
self.models = models
|
|
45
45
|
self.app_options = self._load_app_options(app_options) if app_options else {}
|
|
46
46
|
|
|
47
47
|
base_widgets = self._initialize_layout()
|
|
@@ -177,39 +177,6 @@ class ServingGUITemplate(ServingGUI):
|
|
|
177
177
|
"runtime": self.runtime,
|
|
178
178
|
}
|
|
179
179
|
|
|
180
|
-
# Loaders
|
|
181
|
-
def _load_models(self, models: str) -> List[Dict[str, Any]]:
|
|
182
|
-
"""
|
|
183
|
-
Loads models from the provided file or list of model configurations.
|
|
184
|
-
"""
|
|
185
|
-
if isinstance(models, str):
|
|
186
|
-
if sly_fs.file_exists(models) and sly_fs.get_file_ext(models) == ".json":
|
|
187
|
-
models = sly_json.load_json_file(models)
|
|
188
|
-
else:
|
|
189
|
-
raise ValueError("File not found or invalid file format.")
|
|
190
|
-
else:
|
|
191
|
-
raise ValueError(
|
|
192
|
-
"Invalid models file. Please provide a valid '.json' file with list of model configurations."
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
if not isinstance(models, list):
|
|
196
|
-
raise ValueError("models parameters must be a list of dicts")
|
|
197
|
-
for item in models:
|
|
198
|
-
if not isinstance(item, dict):
|
|
199
|
-
raise ValueError(f"Each item in models must be a dict.")
|
|
200
|
-
model_meta = item.get("meta")
|
|
201
|
-
if model_meta is None:
|
|
202
|
-
raise ValueError(
|
|
203
|
-
"Model metadata not found. Please update provided models parameter to include key 'meta'."
|
|
204
|
-
)
|
|
205
|
-
model_files = model_meta.get("model_files")
|
|
206
|
-
if model_files is None:
|
|
207
|
-
raise ValueError(
|
|
208
|
-
"Model files not found in model metadata. "
|
|
209
|
-
"Please update provided models oarameter to include key 'model_files' in 'meta' key."
|
|
210
|
-
)
|
|
211
|
-
return models
|
|
212
|
-
|
|
213
180
|
def _load_app_options(self, app_options: str = None) -> Dict[str, Any]:
|
|
214
181
|
"""
|
|
215
182
|
Loads the app_options parameter to ensure it is in the correct format.
|