supervisely 6.73.419__py3-none-any.whl → 6.73.421__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/api/api.py +10 -5
- supervisely/api/app_api.py +71 -4
- supervisely/api/module_api.py +4 -0
- supervisely/api/nn/deploy_api.py +15 -9
- supervisely/api/nn/ecosystem_models_api.py +201 -0
- supervisely/api/nn/neural_network_api.py +12 -3
- supervisely/api/project_api.py +35 -6
- supervisely/api/task_api.py +5 -1
- supervisely/app/widgets/__init__.py +8 -1
- supervisely/app/widgets/agent_selector/template.html +1 -0
- supervisely/app/widgets/deploy_model/__init__.py +0 -0
- supervisely/app/widgets/deploy_model/deploy_model.py +729 -0
- supervisely/app/widgets/dropdown_checkbox_selector/__init__.py +0 -0
- supervisely/app/widgets/dropdown_checkbox_selector/dropdown_checkbox_selector.py +87 -0
- supervisely/app/widgets/dropdown_checkbox_selector/template.html +12 -0
- supervisely/app/widgets/ecosystem_model_selector/__init__.py +0 -0
- supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +190 -0
- supervisely/app/widgets/experiment_selector/experiment_selector.py +447 -264
- supervisely/app/widgets/fast_table/fast_table.py +402 -74
- supervisely/app/widgets/fast_table/script.js +364 -96
- supervisely/app/widgets/fast_table/style.css +24 -0
- supervisely/app/widgets/fast_table/template.html +43 -3
- supervisely/app/widgets/radio_table/radio_table.py +10 -2
- supervisely/app/widgets/select/select.py +6 -4
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +18 -0
- supervisely/app/widgets/tabs/tabs.py +22 -6
- supervisely/app/widgets/tabs/template.html +5 -1
- supervisely/nn/artifacts/__init__.py +1 -1
- supervisely/nn/artifacts/artifacts.py +10 -2
- supervisely/nn/artifacts/detectron2.py +1 -0
- supervisely/nn/artifacts/hrda.py +1 -0
- supervisely/nn/artifacts/mmclassification.py +20 -0
- supervisely/nn/artifacts/mmdetection.py +5 -3
- supervisely/nn/artifacts/mmsegmentation.py +1 -0
- supervisely/nn/artifacts/ritm.py +1 -0
- supervisely/nn/artifacts/rtdetr.py +1 -0
- supervisely/nn/artifacts/unet.py +1 -0
- supervisely/nn/artifacts/utils.py +3 -0
- supervisely/nn/artifacts/yolov5.py +2 -0
- supervisely/nn/artifacts/yolov8.py +1 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +18 -18
- supervisely/nn/experiments.py +9 -0
- supervisely/nn/inference/gui/serving_gui_template.py +39 -13
- supervisely/nn/inference/inference.py +160 -94
- supervisely/nn/inference/predict_app/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/classes_selector.py +91 -0
- supervisely/nn/inference/predict_app/gui/gui.py +710 -0
- supervisely/nn/inference/predict_app/gui/input_selector.py +165 -0
- supervisely/nn/inference/predict_app/gui/model_selector.py +79 -0
- supervisely/nn/inference/predict_app/gui/output_selector.py +139 -0
- supervisely/nn/inference/predict_app/gui/preview.py +93 -0
- supervisely/nn/inference/predict_app/gui/settings_selector.py +184 -0
- supervisely/nn/inference/predict_app/gui/tags_selector.py +110 -0
- supervisely/nn/inference/predict_app/gui/utils.py +282 -0
- supervisely/nn/inference/predict_app/predict_app.py +184 -0
- supervisely/nn/inference/uploader.py +9 -5
- supervisely/nn/model/prediction.py +2 -0
- supervisely/nn/model/prediction_session.py +20 -3
- supervisely/nn/training/gui/gui.py +131 -44
- supervisely/nn/training/gui/model_selector.py +8 -6
- supervisely/nn/training/gui/train_val_splits_selector.py +122 -70
- supervisely/nn/training/gui/training_artifacts.py +0 -5
- supervisely/nn/training/train_app.py +161 -44
- supervisely/project/project.py +211 -73
- supervisely/template/experiment/experiment.html.jinja +74 -17
- supervisely/template/experiment/experiment_generator.py +258 -112
- supervisely/template/experiment/header.html.jinja +31 -13
- supervisely/template/experiment/sly-style.css +7 -2
- {supervisely-6.73.419.dist-info → supervisely-6.73.421.dist-info}/METADATA +3 -1
- {supervisely-6.73.419.dist-info → supervisely-6.73.421.dist-info}/RECORD +75 -57
- supervisely/app/widgets/experiment_selector/style.css +0 -27
- supervisely/app/widgets/experiment_selector/template.html +0 -61
- {supervisely-6.73.419.dist-info → supervisely-6.73.421.dist-info}/LICENSE +0 -0
- {supervisely-6.73.419.dist-info → supervisely-6.73.421.dist-info}/WHEEL +0 -0
- {supervisely-6.73.419.dist-info → supervisely-6.73.421.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.419.dist-info → supervisely-6.73.421.dist-info}/top_level.txt +0 -0
|
@@ -1,16 +1,32 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
+
import math
|
|
4
5
|
from datetime import datetime
|
|
6
|
+
import json
|
|
5
7
|
from pathlib import Path
|
|
6
|
-
from typing import Dict, Literal, Optional, Tuple
|
|
8
|
+
from typing import Dict, Literal, Optional, Tuple, List
|
|
9
|
+
from urllib.parse import urlencode
|
|
7
10
|
|
|
8
11
|
import supervisely.io.env as sly_env
|
|
9
12
|
import supervisely.io.fs as sly_fs
|
|
10
13
|
import supervisely.io.json as sly_json
|
|
11
|
-
from supervisely import logger
|
|
14
|
+
from supervisely import logger, ProjectInfo
|
|
12
15
|
from supervisely.api.api import Api
|
|
13
16
|
from supervisely.api.file_api import FileInfo
|
|
17
|
+
from supervisely.geometry.any_geometry import AnyGeometry
|
|
18
|
+
from supervisely.geometry.bitmap import Bitmap
|
|
19
|
+
from supervisely.geometry.cuboid import Cuboid
|
|
20
|
+
from supervisely.geometry.cuboid_3d import Cuboid3d
|
|
21
|
+
from supervisely.geometry.graph import GraphNodes
|
|
22
|
+
from supervisely.geometry.multichannel_bitmap import MultichannelBitmap
|
|
23
|
+
from supervisely.geometry.point import Point
|
|
24
|
+
from supervisely.geometry.point_3d import Point3d
|
|
25
|
+
from supervisely.geometry.pointcloud import Pointcloud
|
|
26
|
+
from supervisely.geometry.polygon import Polygon
|
|
27
|
+
from supervisely.geometry.polyline import Polyline
|
|
28
|
+
from supervisely.geometry.rectangle import Rectangle
|
|
29
|
+
from supervisely.imaging.color import rgb2hex
|
|
14
30
|
from supervisely.nn.benchmark.object_detection.metric_provider import (
|
|
15
31
|
METRIC_NAMES as OBJECT_DETECTION_METRIC_NAMES,
|
|
16
32
|
)
|
|
@@ -23,21 +39,16 @@ from supervisely.nn.utils import RuntimeType
|
|
|
23
39
|
from supervisely.project import ProjectMeta
|
|
24
40
|
from supervisely.template.base_generator import BaseGenerator
|
|
25
41
|
|
|
26
|
-
|
|
27
|
-
from
|
|
28
|
-
|
|
29
|
-
from
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
from supervisely.geometry.polygon import Polygon
|
|
37
|
-
from supervisely.geometry.rectangle import Rectangle
|
|
38
|
-
|
|
39
|
-
from supervisely.imaging.color import rgb2hex
|
|
40
|
-
|
|
42
|
+
try:
|
|
43
|
+
from tbparse import SummaryReader # pylint: disable=import-error
|
|
44
|
+
import plotly.express as px # pylint: disable=import-error
|
|
45
|
+
from plotly.subplots import make_subplots # pylint: disable=import-error
|
|
46
|
+
import plotly.graph_objects as go # pylint: disable=import-error
|
|
47
|
+
except Exception as _:
|
|
48
|
+
SummaryReader = None # type: ignore
|
|
49
|
+
px = None # type: ignore
|
|
50
|
+
make_subplots = None # type: ignore
|
|
51
|
+
go = None # type: ignore
|
|
41
52
|
|
|
42
53
|
# @TODO: Partly supports unreleased apps
|
|
43
54
|
class ExperimentGenerator(BaseGenerator):
|
|
@@ -79,6 +90,12 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
79
90
|
def _report_url(self, server_address: str, template_id: int) -> str:
|
|
80
91
|
return f"{server_address}/nn/experiments/{template_id}"
|
|
81
92
|
|
|
93
|
+
def _datasets_url_with_entities_filter(self, project_id: int, entities_filter: List[dict]) -> str:
|
|
94
|
+
base_url = self.api.server_address.rstrip('/')
|
|
95
|
+
path = f"/projects/{project_id}/datasets"
|
|
96
|
+
query = urlencode({"entitiesFilter": json.dumps(entities_filter)})
|
|
97
|
+
return f"{base_url}{path}?{query}"
|
|
98
|
+
|
|
82
99
|
def upload_to_artifacts(self):
|
|
83
100
|
remote_dir = os.path.join(self.info["artifacts_dir"], "visualization")
|
|
84
101
|
self.upload(remote_dir, team_id=self.team_id)
|
|
@@ -87,9 +104,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
87
104
|
remote_report_path = os.path.join(
|
|
88
105
|
self.info["artifacts_dir"], "visualization", "template.vue"
|
|
89
106
|
)
|
|
90
|
-
experiment_report = self.api.file.get_info_by_path(
|
|
91
|
-
self.team_id, remote_report_path
|
|
92
|
-
)
|
|
107
|
+
experiment_report = self.api.file.get_info_by_path(self.team_id, remote_report_path)
|
|
93
108
|
if experiment_report is None:
|
|
94
109
|
raise ValueError("Generate and upload report first")
|
|
95
110
|
return experiment_report
|
|
@@ -107,7 +122,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
107
122
|
context = {
|
|
108
123
|
"env": self._get_env_context(),
|
|
109
124
|
"experiment": self._get_experiment_context(),
|
|
110
|
-
"resources": self.
|
|
125
|
+
"resources": self._get_resources_context(),
|
|
111
126
|
"code": self._get_code_context(),
|
|
112
127
|
"widgets": self._get_widgets_context(),
|
|
113
128
|
}
|
|
@@ -119,18 +134,32 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
119
134
|
|
|
120
135
|
def _get_apps_context(self):
|
|
121
136
|
train_app, serve_app = self._get_app_train_serve_app_info()
|
|
122
|
-
apply_images_app, apply_videos_app = self._get_app_apply_nn_app_info()
|
|
123
137
|
log_viewer_app = self._get_log_viewer_app_info()
|
|
138
|
+
apply_images_app, apply_videos_app = self._get_app_apply_nn_app_info()
|
|
139
|
+
predict_app = self._get_predict_app_info()
|
|
124
140
|
return {
|
|
125
141
|
"train": train_app,
|
|
126
142
|
"serve": serve_app,
|
|
127
143
|
"log_viewer": log_viewer_app,
|
|
128
144
|
"apply_nn_to_images": apply_images_app,
|
|
129
145
|
"apply_nn_to_videos": apply_videos_app,
|
|
146
|
+
"predict": predict_app,
|
|
130
147
|
}
|
|
131
148
|
|
|
132
|
-
def
|
|
133
|
-
|
|
149
|
+
def _get_original_repository_info(self):
|
|
150
|
+
original_repository = self.app_options.get("original_repository", None)
|
|
151
|
+
if original_repository is None:
|
|
152
|
+
return None
|
|
153
|
+
original_repository_info = {
|
|
154
|
+
"name": original_repository.get("name", None),
|
|
155
|
+
"url": original_repository.get("url", None),
|
|
156
|
+
}
|
|
157
|
+
return original_repository_info
|
|
158
|
+
|
|
159
|
+
def _get_resources_context(self):
|
|
160
|
+
apps = self._get_apps_context()
|
|
161
|
+
original_repository = self._get_original_repository_info()
|
|
162
|
+
return {"apps": apps, "original_repository": original_repository}
|
|
134
163
|
|
|
135
164
|
def _get_code_context(self):
|
|
136
165
|
docker_image = self._get_docker_image()
|
|
@@ -141,12 +170,8 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
141
170
|
"docker": {"image": docker_image, "deploy": f"{docker_image}-deploy"},
|
|
142
171
|
"local_prediction": {
|
|
143
172
|
"repo": repo_info,
|
|
144
|
-
"serving_module": (
|
|
145
|
-
|
|
146
|
-
),
|
|
147
|
-
"serving_class": (
|
|
148
|
-
self.serving_class.__name__ if self.serving_class else None
|
|
149
|
-
),
|
|
173
|
+
"serving_module": (self.serving_class.__module__ if self.serving_class else None),
|
|
174
|
+
"serving_class": (self.serving_class.__name__ if self.serving_class else None),
|
|
150
175
|
},
|
|
151
176
|
"demo": {
|
|
152
177
|
"pytorch": pytorch_demo,
|
|
@@ -160,7 +185,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
160
185
|
metrics_table = self._generate_metrics_table(self.info["task_type"])
|
|
161
186
|
sample_gallery = self._get_sample_predictions_gallery()
|
|
162
187
|
classes_table = self._generate_classes_table()
|
|
163
|
-
|
|
188
|
+
training_plots = self._generate_training_plots()
|
|
164
189
|
return {
|
|
165
190
|
"tables": {
|
|
166
191
|
"checkpoints": checkpoints_table,
|
|
@@ -168,6 +193,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
168
193
|
"classes": classes_table,
|
|
169
194
|
},
|
|
170
195
|
"sample_pred_gallery": sample_gallery,
|
|
196
|
+
"training_plots": training_plots,
|
|
171
197
|
}
|
|
172
198
|
|
|
173
199
|
# --------------------------------------------------------------------------- #
|
|
@@ -186,10 +212,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
186
212
|
html.append("<thead><tr><th>Metrics</th><th>Value</th></tr></thead>")
|
|
187
213
|
html.append("<tbody>")
|
|
188
214
|
|
|
189
|
-
if
|
|
190
|
-
task_type == TaskType.OBJECT_DETECTION
|
|
191
|
-
or task_type == TaskType.INSTANCE_SEGMENTATION
|
|
192
|
-
):
|
|
215
|
+
if task_type == TaskType.OBJECT_DETECTION or task_type == TaskType.INSTANCE_SEGMENTATION:
|
|
193
216
|
metric_names = OBJECT_DETECTION_METRIC_NAMES
|
|
194
217
|
elif task_type == TaskType.SEMANTIC_SEGMENTATION:
|
|
195
218
|
metric_names = SEMANTIC_SEGMENTATION_METRIC_NAMES
|
|
@@ -313,9 +336,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
313
336
|
primary_metric_value = eval_metrics.get(primary_metric_name)
|
|
314
337
|
|
|
315
338
|
if primary_metric_name is None or primary_metric_value is None:
|
|
316
|
-
logger.debug(
|
|
317
|
-
f"Primary metric is not found in evaluation metrics: {eval_metrics}"
|
|
318
|
-
)
|
|
339
|
+
logger.debug(f"Primary metric is not found in evaluation metrics: {eval_metrics}")
|
|
319
340
|
return primary_metric
|
|
320
341
|
|
|
321
342
|
primary_metric = {
|
|
@@ -339,10 +360,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
339
360
|
return display_metrics
|
|
340
361
|
|
|
341
362
|
main_metrics = []
|
|
342
|
-
if
|
|
343
|
-
task_type == TaskType.OBJECT_DETECTION
|
|
344
|
-
or task_type == TaskType.INSTANCE_SEGMENTATION
|
|
345
|
-
):
|
|
363
|
+
if task_type == TaskType.OBJECT_DETECTION or task_type == TaskType.INSTANCE_SEGMENTATION:
|
|
346
364
|
main_metrics = ["mAP", "AP75", "AP50", "precision", "recall"]
|
|
347
365
|
elif task_type == TaskType.SEMANTIC_SEGMENTATION:
|
|
348
366
|
main_metrics = ["mIoU", "mPixel", "mPrecision", "mRecall", "mF1"]
|
|
@@ -384,29 +402,20 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
384
402
|
if trt_checkpoint is not None:
|
|
385
403
|
checkpoints.append(trt_checkpoint)
|
|
386
404
|
|
|
387
|
-
checkpoint_paths = [
|
|
388
|
-
os.path.join(self.artifacts_dir, ckpt) for ckpt in checkpoints
|
|
389
|
-
]
|
|
405
|
+
checkpoint_paths = [os.path.join(self.artifacts_dir, ckpt) for ckpt in checkpoints]
|
|
390
406
|
checkpoint_infos = [
|
|
391
|
-
self.api.file.get_info_by_path(self.team_id, path)
|
|
392
|
-
for path in checkpoint_paths
|
|
393
|
-
]
|
|
394
|
-
checkpoint_sizes = [
|
|
395
|
-
f"{info.sizeb / 1024 / 1024:.2f} MB" for info in checkpoint_infos
|
|
407
|
+
self.api.file.get_info_by_path(self.team_id, path) for path in checkpoint_paths
|
|
396
408
|
]
|
|
409
|
+
checkpoint_sizes = [f"{info.sizeb / 1024 / 1024:.2f} MB" for info in checkpoint_infos]
|
|
397
410
|
checkpoint_dl_links = [
|
|
398
411
|
f"<a href='{info.full_storage_url}' download='{sly_fs.get_file_name_with_ext(info.path)}'>Download</a>"
|
|
399
412
|
for info in checkpoint_infos
|
|
400
413
|
]
|
|
401
414
|
|
|
402
415
|
html = ['<table class="table">']
|
|
403
|
-
html.append(
|
|
404
|
-
"<thead><tr><th>Checkpoint</th><th>Size</th><th> </th></tr></thead>"
|
|
405
|
-
)
|
|
416
|
+
html.append("<thead><tr><th>Checkpoint</th><th>Size</th><th> </th></tr></thead>")
|
|
406
417
|
html.append("<tbody>")
|
|
407
|
-
for checkpoint, size, dl_link in zip(
|
|
408
|
-
checkpoints, checkpoint_sizes, checkpoint_dl_links
|
|
409
|
-
):
|
|
418
|
+
for checkpoint, size, dl_link in zip(checkpoints, checkpoint_sizes, checkpoint_dl_links):
|
|
410
419
|
if isinstance(checkpoint, str):
|
|
411
420
|
html.append(
|
|
412
421
|
f"<tr><td>{os.path.basename(checkpoint)}</td><td>{size}</td><td>{dl_link}</td></tr>"
|
|
@@ -517,9 +526,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
517
526
|
best_checkpoint_path = os.path.join(
|
|
518
527
|
self.artifacts_dir, "checkpoints", self.info["best_checkpoint"]
|
|
519
528
|
)
|
|
520
|
-
best_checkpoint_info = self.api.file.get_info_by_path(
|
|
521
|
-
self.team_id, best_checkpoint_path
|
|
522
|
-
)
|
|
529
|
+
best_checkpoint_info = self.api.file.get_info_by_path(self.team_id, best_checkpoint_path)
|
|
523
530
|
best_checkpoint = {
|
|
524
531
|
"name": self.info["best_checkpoint"],
|
|
525
532
|
"path": best_checkpoint_path,
|
|
@@ -560,9 +567,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
560
567
|
self.team_id,
|
|
561
568
|
os.path.join(os.path.dirname(onnx_checkpoint_path), "classes.json"),
|
|
562
569
|
)
|
|
563
|
-
onnx_file_info = self.api.file.get_info_by_path(
|
|
564
|
-
self.team_id, onnx_checkpoint_path
|
|
565
|
-
)
|
|
570
|
+
onnx_file_info = self.api.file.get_info_by_path(self.team_id, onnx_checkpoint_path)
|
|
566
571
|
onnx_checkpoint_data = {
|
|
567
572
|
"name": os.path.basename(export.get(RuntimeType.ONNXRUNTIME)),
|
|
568
573
|
"path": onnx_checkpoint_path,
|
|
@@ -572,16 +577,12 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
572
577
|
}
|
|
573
578
|
trt_checkpoint = export.get(RuntimeType.TENSORRT)
|
|
574
579
|
if trt_checkpoint is not None:
|
|
575
|
-
trt_checkpoint_path = os.path.join(
|
|
576
|
-
self.artifacts_dir, export.get(RuntimeType.TENSORRT)
|
|
577
|
-
)
|
|
580
|
+
trt_checkpoint_path = os.path.join(self.artifacts_dir, export.get(RuntimeType.TENSORRT))
|
|
578
581
|
classes_file = self.api.file.get_info_by_path(
|
|
579
582
|
self.team_id,
|
|
580
583
|
os.path.join(os.path.dirname(trt_checkpoint_path), "classes.json"),
|
|
581
584
|
)
|
|
582
|
-
trt_file_info = self.api.file.get_info_by_path(
|
|
583
|
-
self.team_id, trt_checkpoint_path
|
|
584
|
-
)
|
|
585
|
+
trt_file_info = self.api.file.get_info_by_path(self.team_id, trt_checkpoint_path)
|
|
585
586
|
trt_checkpoint_data = {
|
|
586
587
|
"name": os.path.basename(export.get(RuntimeType.TENSORRT)),
|
|
587
588
|
"path": trt_checkpoint_path,
|
|
@@ -638,9 +639,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
638
639
|
root_dir = current_dir
|
|
639
640
|
|
|
640
641
|
while root_dir.parent != root_dir:
|
|
641
|
-
config_path =
|
|
642
|
-
root_dir / "supervisely_integration" / "train" / "config.json"
|
|
643
|
-
)
|
|
642
|
+
config_path = root_dir / "supervisely_integration" / "train" / "config.json"
|
|
644
643
|
if config_path.exists():
|
|
645
644
|
break
|
|
646
645
|
root_dir = root_dir.parent
|
|
@@ -748,9 +747,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
748
747
|
:rtype: Tuple[str, str]
|
|
749
748
|
"""
|
|
750
749
|
|
|
751
|
-
def find_app_by_framework(
|
|
752
|
-
api: Api, framework: str, action: Literal["train", "serve"]
|
|
753
|
-
):
|
|
750
|
+
def find_app_by_framework(api: Api, framework: str, action: Literal["train", "serve"]):
|
|
754
751
|
try:
|
|
755
752
|
modules = api.app.get_list_ecosystem_modules(
|
|
756
753
|
categories=[action, f"framework:{framework}"],
|
|
@@ -763,26 +760,18 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
763
760
|
logger.warning(f"Failed to find {action} app by framework: {e}")
|
|
764
761
|
return None
|
|
765
762
|
|
|
766
|
-
train_app_info = find_app_by_framework(
|
|
767
|
-
|
|
768
|
-
)
|
|
769
|
-
serve_app_info = find_app_by_framework(
|
|
770
|
-
self.api, self.info["framework_name"], "serve"
|
|
771
|
-
)
|
|
763
|
+
train_app_info = find_app_by_framework(self.api, self.info["framework_name"], "train")
|
|
764
|
+
serve_app_info = find_app_by_framework(self.api, self.info["framework_name"], "serve")
|
|
772
765
|
|
|
773
766
|
if train_app_info is not None:
|
|
774
|
-
train_app_slug = train_app_info["slug"].replace(
|
|
775
|
-
"supervisely-ecosystem/", ""
|
|
776
|
-
)
|
|
767
|
+
train_app_slug = train_app_info["slug"].replace("supervisely-ecosystem/", "")
|
|
777
768
|
train_app_id = train_app_info["id"]
|
|
778
769
|
else:
|
|
779
770
|
train_app_slug = None
|
|
780
771
|
train_app_id = None
|
|
781
772
|
|
|
782
773
|
if serve_app_info is not None:
|
|
783
|
-
serve_app_slug = serve_app_info["slug"].replace(
|
|
784
|
-
"supervisely-ecosystem/", ""
|
|
785
|
-
)
|
|
774
|
+
serve_app_slug = serve_app_info["slug"].replace("supervisely-ecosystem/", "")
|
|
786
775
|
serve_app_id = serve_app_info["id"]
|
|
787
776
|
else:
|
|
788
777
|
serve_app_slug = None
|
|
@@ -814,9 +803,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
814
803
|
if task_info is not None:
|
|
815
804
|
agent_info["name"] = task_info["agentName"]
|
|
816
805
|
agent_info["id"] = task_info["agentId"]
|
|
817
|
-
agent_info["link"] =
|
|
818
|
-
f"{self.api.server_address}/nodes/{agent_info['id']}/info"
|
|
819
|
-
)
|
|
806
|
+
agent_info["link"] = f"{self.api.server_address}/nodes/{agent_info['id']}/info"
|
|
820
807
|
return agent_info
|
|
821
808
|
|
|
822
809
|
def _get_class_names(self, model_classes: list) -> dict:
|
|
@@ -839,51 +826,111 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
839
826
|
),
|
|
840
827
|
}
|
|
841
828
|
|
|
829
|
+
def _get_predict_app_info(self):
|
|
830
|
+
"""
|
|
831
|
+
Get predict app info.
|
|
832
|
+
|
|
833
|
+
:returns: Predict app info
|
|
834
|
+
:rtype: dict
|
|
835
|
+
"""
|
|
836
|
+
predict_app_slug = "supervisely-ecosystem/apply-nn"
|
|
837
|
+
predict_app_module_id = self.api.app.get_ecosystem_module_id(predict_app_slug)
|
|
838
|
+
predict_app = {"slug": predict_app_slug, "module_id": predict_app_module_id}
|
|
839
|
+
return predict_app
|
|
840
|
+
|
|
842
841
|
def _get_app_apply_nn_app_info(self):
|
|
843
|
-
"""
|
|
842
|
+
"""
|
|
843
|
+
Get apply NN app info.
|
|
844
844
|
|
|
845
845
|
:returns: Apply NN app info
|
|
846
846
|
:rtype: dict
|
|
847
847
|
"""
|
|
848
|
-
|
|
848
|
+
# Images
|
|
849
849
|
apply_nn_images_slug = "nn-image-labeling/project-dataset"
|
|
850
850
|
apply_nn_images_module_id = self.api.app.get_ecosystem_module_id(
|
|
851
851
|
f"supervisely-ecosystem/{apply_nn_images_slug}"
|
|
852
852
|
)
|
|
853
|
+
apply_nn_images_app = {"slug": apply_nn_images_slug, "module_id": apply_nn_images_module_id}
|
|
854
|
+
|
|
855
|
+
# Videos
|
|
853
856
|
apply_nn_videos_slug = "apply-nn-to-videos-project"
|
|
854
857
|
apply_nn_videos_module_id = self.api.app.get_ecosystem_module_id(
|
|
855
858
|
f"supervisely-ecosystem/{apply_nn_videos_slug}"
|
|
856
859
|
)
|
|
860
|
+
apply_nn_videos_app = {"slug": apply_nn_videos_slug, "module_id": apply_nn_videos_module_id}
|
|
861
|
+
return apply_nn_images_app, apply_nn_videos_app
|
|
857
862
|
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
"
|
|
864
|
-
"
|
|
863
|
+
def _get_project_splits(self):
|
|
864
|
+
train_collection_id = self.info.get("train_collection_id", None)
|
|
865
|
+
if train_collection_id is not None:
|
|
866
|
+
train_collection_info = self.api.entities_collection.get_info_by_id(train_collection_id)
|
|
867
|
+
train_collection_name = train_collection_info.name
|
|
868
|
+
train_collection_url = self._datasets_url_with_entities_filter(self.info["project_id"], [{"type": "entities_collection", "data": {"collectionId": train_collection_id, "include": True}}])
|
|
869
|
+
train_size = self.info.get("train_size", "N/A")
|
|
870
|
+
else:
|
|
871
|
+
train_collection_name = None
|
|
872
|
+
train_size = None
|
|
873
|
+
train_collection_url = None
|
|
874
|
+
|
|
875
|
+
val_collection_id = self.info.get("val_collection_id", None)
|
|
876
|
+
if val_collection_id is not None:
|
|
877
|
+
val_collection_info = self.api.entities_collection.get_info_by_id(val_collection_id)
|
|
878
|
+
val_collection_name = val_collection_info.name
|
|
879
|
+
val_collection_url = self._datasets_url_with_entities_filter(self.info["project_id"], [{"type": "entities_collection", "data": {"collectionId": val_collection_id, "include": True}}])
|
|
880
|
+
val_size = self.info.get("val_size", "N/A")
|
|
881
|
+
else:
|
|
882
|
+
val_collection_name = None
|
|
883
|
+
val_size = None
|
|
884
|
+
val_collection_url = None
|
|
885
|
+
|
|
886
|
+
splits = {
|
|
887
|
+
"train": {
|
|
888
|
+
"name": train_collection_name,
|
|
889
|
+
"size": train_size,
|
|
890
|
+
"url": train_collection_url,
|
|
891
|
+
},
|
|
892
|
+
"val": {
|
|
893
|
+
"name": val_collection_name,
|
|
894
|
+
"size": val_size,
|
|
895
|
+
"url": val_collection_url,
|
|
896
|
+
},
|
|
865
897
|
}
|
|
866
|
-
return
|
|
898
|
+
return splits
|
|
899
|
+
|
|
900
|
+
def _get_project_version(self, project_info: ProjectInfo):
|
|
901
|
+
project_version = project_info.version
|
|
902
|
+
if project_version is None:
|
|
903
|
+
version_info = {
|
|
904
|
+
"version": "N/A",
|
|
905
|
+
"id": None,
|
|
906
|
+
"url": None,
|
|
907
|
+
}
|
|
908
|
+
else:
|
|
909
|
+
version_info = {
|
|
910
|
+
"version": project_version["version"],
|
|
911
|
+
"id": project_version["id"],
|
|
912
|
+
"url": f"{self.api.server_address}/projects/{project_info.id}/versions",
|
|
913
|
+
}
|
|
914
|
+
return version_info
|
|
867
915
|
|
|
868
916
|
def _get_project_context(self):
|
|
869
917
|
project_id = self.info["project_id"]
|
|
870
918
|
project_info = self.api.project.get_info_by_id(project_id)
|
|
919
|
+
project_version = self._get_project_version(project_info)
|
|
871
920
|
project_type = project_info.type
|
|
872
921
|
project_url = f"{self.api.server_address}/projects/{project_id}/datasets"
|
|
873
|
-
project_train_size = self.info.get("train_size", "N/A")
|
|
874
|
-
project_val_size = self.info.get("val_size", "N/A")
|
|
875
922
|
model_classes = [cls.name for cls in self.model_meta.obj_classes]
|
|
876
923
|
class_names = self._get_class_names(model_classes)
|
|
924
|
+
splits = self._get_project_splits()
|
|
877
925
|
|
|
878
926
|
project_context = {
|
|
879
927
|
"id": project_id,
|
|
928
|
+
"workspace_id": project_info.workspace_id if project_info else None,
|
|
880
929
|
"name": project_info.name if project_info else "Project was archived",
|
|
881
|
-
"
|
|
882
|
-
"
|
|
883
|
-
"
|
|
884
|
-
|
|
885
|
-
"val": project_val_size,
|
|
886
|
-
},
|
|
930
|
+
"version": project_version,
|
|
931
|
+
"url": project_url if project_info else None,
|
|
932
|
+
"type": project_type if project_info else None,
|
|
933
|
+
"splits": splits,
|
|
887
934
|
"classes": {
|
|
888
935
|
"count": len(model_classes),
|
|
889
936
|
"names": class_names,
|
|
@@ -897,10 +944,14 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
897
944
|
base_checkpoint_path = None
|
|
898
945
|
if base_checkpoint_link is not None:
|
|
899
946
|
if base_checkpoint_link.startswith("/experiments/"):
|
|
900
|
-
base_checkpoint_info = self.api.file.get_info_by_path(
|
|
947
|
+
base_checkpoint_info = self.api.file.get_info_by_path(
|
|
948
|
+
self.team_id, base_checkpoint_link
|
|
949
|
+
)
|
|
901
950
|
base_checkpoint_name = base_checkpoint_info.name
|
|
902
951
|
base_checkpoint_link = base_checkpoint_info.full_storage_url
|
|
903
|
-
base_checkpoint_path =
|
|
952
|
+
base_checkpoint_path = (
|
|
953
|
+
f"{self.api.server_address}/files/?path={base_checkpoint_info.path}"
|
|
954
|
+
)
|
|
904
955
|
|
|
905
956
|
base_checkpoint = {
|
|
906
957
|
"name": base_checkpoint_name,
|
|
@@ -930,9 +981,7 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
930
981
|
onnx_checkpoint, trt_checkpoint = self._get_optimized_checkpoints()
|
|
931
982
|
|
|
932
983
|
logs_path = self.info.get("logs", {}).get("link")
|
|
933
|
-
logs_url =
|
|
934
|
-
f"{self.api.server_address}/files/?path={logs_path}" if logs_path else None
|
|
935
|
-
)
|
|
984
|
+
logs_url = f"{self.api.server_address}/files/?path={logs_path}" if logs_path else None
|
|
936
985
|
|
|
937
986
|
primary_metric = self._get_primary_metric()
|
|
938
987
|
display_metrics = self._get_display_metrics(self.info["task_type"])
|
|
@@ -994,3 +1043,100 @@ class ExperimentGenerator(BaseGenerator):
|
|
|
994
1043
|
},
|
|
995
1044
|
}
|
|
996
1045
|
return experiment_context
|
|
1046
|
+
|
|
1047
|
+
def _generate_training_plots(self) -> Optional[str]:
|
|
1048
|
+
# pip install tbparse plotly kaleido
|
|
1049
|
+
if SummaryReader is None or px is None:
|
|
1050
|
+
logger.warning("tbparse or plotly is not installed – skipping training plots generation")
|
|
1051
|
+
return None
|
|
1052
|
+
|
|
1053
|
+
logs_path = self.info.get("logs", {}).get("link")
|
|
1054
|
+
if logs_path is None:
|
|
1055
|
+
return None
|
|
1056
|
+
|
|
1057
|
+
events_files: List[str] = []
|
|
1058
|
+
remote_log_files = self.api.file.list(self.team_id, logs_path, return_type="fileinfo")
|
|
1059
|
+
try:
|
|
1060
|
+
for f in remote_log_files:
|
|
1061
|
+
if f.name.startswith("events.out.tfevents"):
|
|
1062
|
+
events_files.append(f.path)
|
|
1063
|
+
except Exception as e:
|
|
1064
|
+
logger.warning(f"Failed to get training logs: {e}")
|
|
1065
|
+
return None
|
|
1066
|
+
|
|
1067
|
+
if len(events_files) == 0:
|
|
1068
|
+
return None
|
|
1069
|
+
|
|
1070
|
+
tmp_logs_dir = os.path.join(self.output_dir, "logs_tmp")
|
|
1071
|
+
sly_fs.mkdir(tmp_logs_dir, True)
|
|
1072
|
+
local_event_path = os.path.join(tmp_logs_dir, os.path.basename(events_files[0]))
|
|
1073
|
+
try:
|
|
1074
|
+
self.api.file.download(self.team_id, events_files[0], local_event_path)
|
|
1075
|
+
except Exception as e:
|
|
1076
|
+
logger.warning(f"Failed to download training log: {e}")
|
|
1077
|
+
return None
|
|
1078
|
+
|
|
1079
|
+
try:
|
|
1080
|
+
reader = SummaryReader(local_event_path)
|
|
1081
|
+
scalars_df = reader.scalars
|
|
1082
|
+
except Exception as e:
|
|
1083
|
+
logger.warning(f"Failed to read training log: {e}")
|
|
1084
|
+
return None
|
|
1085
|
+
|
|
1086
|
+
if scalars_df is None or scalars_df.empty:
|
|
1087
|
+
return None
|
|
1088
|
+
|
|
1089
|
+
tags_to_plot = scalars_df["tag"].unique().tolist()[:12]
|
|
1090
|
+
df_plot = scalars_df[scalars_df["tag"].isin(tags_to_plot)]
|
|
1091
|
+
|
|
1092
|
+
try:
|
|
1093
|
+
data_dir = os.path.join(self.output_dir, "data")
|
|
1094
|
+
if not sly_fs.dir_exists(data_dir):
|
|
1095
|
+
sly_fs.mkdir(data_dir, True)
|
|
1096
|
+
|
|
1097
|
+
n_tags = len(tags_to_plot)
|
|
1098
|
+
side = min(4, max(2, math.ceil(math.sqrt(n_tags))))
|
|
1099
|
+
cols = side
|
|
1100
|
+
rows = math.ceil(n_tags / cols)
|
|
1101
|
+
fig = make_subplots(rows=rows, cols=cols, subplot_titles=tags_to_plot)
|
|
1102
|
+
|
|
1103
|
+
for idx, tag in enumerate(tags_to_plot, start=1):
|
|
1104
|
+
tag_df = df_plot[df_plot["tag"] == tag]
|
|
1105
|
+
if tag_df.empty:
|
|
1106
|
+
continue
|
|
1107
|
+
row = (idx - 1) // cols + 1
|
|
1108
|
+
col = (idx - 1) % cols + 1
|
|
1109
|
+
fig.add_trace(
|
|
1110
|
+
go.Scatter(
|
|
1111
|
+
x=tag_df["step"],
|
|
1112
|
+
y=tag_df["value"],
|
|
1113
|
+
mode="lines",
|
|
1114
|
+
name=tag,
|
|
1115
|
+
showlegend=False,
|
|
1116
|
+
),
|
|
1117
|
+
row=row,
|
|
1118
|
+
col=col,
|
|
1119
|
+
)
|
|
1120
|
+
|
|
1121
|
+
if tag.startswith("lr"):
|
|
1122
|
+
fig.update_yaxes(tickformat=".0e", row=row, col=col)
|
|
1123
|
+
|
|
1124
|
+
fig.update_layout(
|
|
1125
|
+
height=300 * rows,
|
|
1126
|
+
width=400 * cols,
|
|
1127
|
+
showlegend=False,
|
|
1128
|
+
)
|
|
1129
|
+
|
|
1130
|
+
local_img_path = os.path.join(data_dir, "training_plots_grid.png")
|
|
1131
|
+
fig.write_image(local_img_path, engine="kaleido")
|
|
1132
|
+
sly_fs.remove_dir(tmp_logs_dir)
|
|
1133
|
+
|
|
1134
|
+
img_widget = f"<sly-iw-image src=\"/data/training_plots_grid.png\" :template-base-path=\"templateBasePath\" :options=\"{{ style: {{ width: '70%', height: 'auto' }} }}\" />"
|
|
1135
|
+
return img_widget
|
|
1136
|
+
except Exception as e:
|
|
1137
|
+
logger.warning(f"Failed to build or save static training plot: {e}")
|
|
1138
|
+
return None
|
|
1139
|
+
|
|
1140
|
+
|
|
1141
|
+
# Pylint Errors: ************* Module supervisely.api.app_api
|
|
1142
|
+
# supervisely/api/app_api.py:1463:20: E0606: Possibly using variable 'progress' before assignment (possibly-used-before-assignment)
|