supervisely 6.73.369__py3-none-any.whl → 6.73.371__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/api/pointcloud/pointcloud_api.py +12 -6
- supervisely/app/widgets/experiment_selector/experiment_selector.py +2 -2
- supervisely/nn/inference/cache.py +5 -2
- supervisely/nn/inference/inference.py +17 -8
- supervisely/nn/inference/interactive_segmentation/functional.py +6 -0
- supervisely/nn/training/gui/gui.py +6 -3
- supervisely/nn/training/train_app.py +102 -14
- {supervisely-6.73.369.dist-info → supervisely-6.73.371.dist-info}/METADATA +1 -1
- {supervisely-6.73.369.dist-info → supervisely-6.73.371.dist-info}/RECORD +13 -13
- {supervisely-6.73.369.dist-info → supervisely-6.73.371.dist-info}/LICENSE +0 -0
- {supervisely-6.73.369.dist-info → supervisely-6.73.371.dist-info}/WHEEL +0 -0
- {supervisely-6.73.369.dist-info → supervisely-6.73.371.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.369.dist-info → supervisely-6.73.371.dist-info}/top_level.txt +0 -0
|
@@ -36,6 +36,7 @@ from supervisely.io.fs import (
|
|
|
36
36
|
)
|
|
37
37
|
from supervisely.pointcloud.pointcloud import is_valid_format
|
|
38
38
|
from supervisely.sly_logger import logger
|
|
39
|
+
from supervisely.imaging import image as sly_image
|
|
39
40
|
|
|
40
41
|
|
|
41
42
|
class PointcloudInfo(NamedTuple):
|
|
@@ -420,7 +421,7 @@ class PointcloudApi(RemoveableBulkModuleApi):
|
|
|
420
421
|
convert_json_info_cb=lambda x: x,
|
|
421
422
|
)
|
|
422
423
|
|
|
423
|
-
def download_related_image(self, id: int, path: str) -> Response:
|
|
424
|
+
def download_related_image(self, id: int, path: str = None) -> Response:
|
|
424
425
|
"""
|
|
425
426
|
Download a related context image from Supervisely to local directory by image id.
|
|
426
427
|
|
|
@@ -454,11 +455,16 @@ class PointcloudApi(RemoveableBulkModuleApi):
|
|
|
454
455
|
{ApiField.ID: id},
|
|
455
456
|
stream=True,
|
|
456
457
|
)
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
458
|
+
|
|
459
|
+
if path:
|
|
460
|
+
ensure_base_path(path)
|
|
461
|
+
with open(path, "wb") as fd:
|
|
462
|
+
for chunk in response.iter_content(chunk_size=1024 * 1024):
|
|
463
|
+
fd.write(chunk)
|
|
464
|
+
return response
|
|
465
|
+
else:
|
|
466
|
+
related_image = sly_image.read_bytes(response.content, False)
|
|
467
|
+
return related_image
|
|
462
468
|
|
|
463
469
|
# @TODO: copypaste from video_api
|
|
464
470
|
def upload_hash(
|
|
@@ -52,7 +52,7 @@ class ExperimentSelector(Widget):
|
|
|
52
52
|
self._experiment_info = experiment_info
|
|
53
53
|
|
|
54
54
|
task_id = experiment_info.task_id
|
|
55
|
-
if task_id == "debug-session":
|
|
55
|
+
if task_id == "debug-session" or task_id == -1:
|
|
56
56
|
pass
|
|
57
57
|
elif type(task_id) is str:
|
|
58
58
|
if task_id.isdigit():
|
|
@@ -392,7 +392,7 @@ class ExperimentSelector(Widget):
|
|
|
392
392
|
if result:
|
|
393
393
|
task_type, model_row = result
|
|
394
394
|
if task_type is not None and model_row is not None:
|
|
395
|
-
if model_row.task_id == "debug-session":
|
|
395
|
+
if model_row.task_id == "debug-session" or model_row.task_id == -1:
|
|
396
396
|
self.__debug_row = (task_type, model_row)
|
|
397
397
|
continue
|
|
398
398
|
table_rows[task_type].append(model_row)
|
|
@@ -233,14 +233,17 @@ class InferenceImageCache:
|
|
|
233
233
|
with self._lock:
|
|
234
234
|
self._cache.clear(False)
|
|
235
235
|
|
|
236
|
-
def download_image(self, api: sly.Api, image_id: int):
|
|
236
|
+
def download_image(self, api: sly.Api, image_id: int, related: bool = False):
|
|
237
237
|
name = self._image_name(image_id)
|
|
238
238
|
self._wait_if_in_queue(name, api.logger)
|
|
239
239
|
|
|
240
240
|
if name not in self._cache:
|
|
241
241
|
self._load_queue.set(name, image_id)
|
|
242
242
|
api.logger.debug(f"Add image #{image_id} to cache")
|
|
243
|
-
|
|
243
|
+
if not related:
|
|
244
|
+
img = api.image.download_np(image_id)
|
|
245
|
+
else:
|
|
246
|
+
img = api.pointcloud.download_related_image(image_id)
|
|
244
247
|
self._add_to_cache(name, img)
|
|
245
248
|
return img
|
|
246
249
|
|
|
@@ -878,10 +878,16 @@ class Inference:
|
|
|
878
878
|
|
|
879
879
|
try:
|
|
880
880
|
if is_production():
|
|
881
|
-
|
|
881
|
+
without_workflow = deploy_params.get("without_workflow", False)
|
|
882
|
+
if without_workflow is False:
|
|
883
|
+
self._add_workflow_input(model_source, model_files, model_info)
|
|
882
884
|
except Exception as e:
|
|
883
885
|
logger.warning(f"Failed to add input to the workflow: {repr(e)}")
|
|
884
886
|
|
|
887
|
+
# remove is_benchmark from deploy_params
|
|
888
|
+
if "without_workflow" in deploy_params:
|
|
889
|
+
deploy_params.pop("without_workflow")
|
|
890
|
+
|
|
885
891
|
self._load_model(deploy_params)
|
|
886
892
|
if self._model_meta is None:
|
|
887
893
|
self._set_model_meta_from_classes()
|
|
@@ -3709,17 +3715,20 @@ class Inference:
|
|
|
3709
3715
|
)
|
|
3710
3716
|
|
|
3711
3717
|
app_name = sly_env.app_name()
|
|
3712
|
-
meta = WorkflowMeta(node_settings=WorkflowSettings(title=
|
|
3718
|
+
meta = WorkflowMeta(node_settings=WorkflowSettings(title=app_name))
|
|
3713
3719
|
|
|
3714
3720
|
logger.debug(
|
|
3715
3721
|
f"Workflow Input: Checkpoint URL - {checkpoint_url}, Checkpoint Name - {checkpoint_name}"
|
|
3716
3722
|
)
|
|
3717
|
-
if
|
|
3718
|
-
self.api.
|
|
3719
|
-
|
|
3720
|
-
|
|
3721
|
-
|
|
3722
|
-
|
|
3723
|
+
if model_source == ModelSource.CUSTOM:
|
|
3724
|
+
if checkpoint_url and self.api.file.exists(sly_env.team_id(), checkpoint_url):
|
|
3725
|
+
# self.api.app.workflow.add_input_file(checkpoint_url, model_weight=True, meta=meta)
|
|
3726
|
+
remote_checkpoint_dir = os.path.dirname(checkpoint_url)
|
|
3727
|
+
self.api.app.workflow.add_input_folder(remote_checkpoint_dir, meta=meta)
|
|
3728
|
+
else:
|
|
3729
|
+
logger.debug(
|
|
3730
|
+
f"Checkpoint {checkpoint_url} not found in Team Files. Cannot set workflow input"
|
|
3731
|
+
)
|
|
3723
3732
|
|
|
3724
3733
|
|
|
3725
3734
|
def _exclude_duplicated_predictions(
|
|
@@ -52,6 +52,10 @@ def download_image_from_context(
|
|
|
52
52
|
return api.video.frame.download_np(
|
|
53
53
|
context["video"]["video_id"], context["video"]["frame_index"]
|
|
54
54
|
)
|
|
55
|
+
elif "pcd_related_image_id" in context:
|
|
56
|
+
if cache_load_img is not None:
|
|
57
|
+
return cache_load_img(api, context["pcd_related_image_id"], related=True)
|
|
58
|
+
return api.pointcloud.download_related_image(context["pcd_related_image_id"])
|
|
55
59
|
else:
|
|
56
60
|
raise Exception("Project type is not supported")
|
|
57
61
|
|
|
@@ -110,6 +114,8 @@ def get_hash_from_context(context: dict):
|
|
|
110
114
|
return "_".join(map(str, [volume_id, slice_index, plane, window_center, window_width]))
|
|
111
115
|
elif "video" in context:
|
|
112
116
|
return "_".join(map(str, [context["video"]["video_id"], context["video"]["frame_index"]]))
|
|
117
|
+
elif "pcd_related_image_id" in context:
|
|
118
|
+
return str(context["pcd_related_image_id"])
|
|
113
119
|
else:
|
|
114
120
|
raise Exception("Project type is not supported")
|
|
115
121
|
|
|
@@ -242,7 +242,7 @@ class TrainGUI:
|
|
|
242
242
|
else:
|
|
243
243
|
self.task_id = sly_env.task_id(raise_not_found=False)
|
|
244
244
|
if self.task_id is None:
|
|
245
|
-
self.task_id =
|
|
245
|
+
self.task_id = -1
|
|
246
246
|
|
|
247
247
|
self.framework_name = framework_name
|
|
248
248
|
self.models = models
|
|
@@ -257,7 +257,7 @@ class TrainGUI:
|
|
|
257
257
|
self.project_info = self._api.project.get_info_by_id(self.project_id)
|
|
258
258
|
if self.project_info.type is None:
|
|
259
259
|
raise ValueError(f"Project with ID: '{self.project_id}' does not exist or was archived")
|
|
260
|
-
|
|
260
|
+
|
|
261
261
|
self.project_meta = ProjectMeta.from_json(self._api.project.get_meta(self.project_id))
|
|
262
262
|
|
|
263
263
|
if self.workspace_id is None:
|
|
@@ -350,7 +350,10 @@ class TrainGUI:
|
|
|
350
350
|
if model_name is None:
|
|
351
351
|
experiment_name = "Enter experiment name"
|
|
352
352
|
else:
|
|
353
|
-
|
|
353
|
+
if self.task_id == -1:
|
|
354
|
+
experiment_name = f"debug_{self.project_info.name}_{model_name}"
|
|
355
|
+
else:
|
|
356
|
+
experiment_name = f"{self.task_id}_{self.project_info.name}_{model_name}"
|
|
354
357
|
|
|
355
358
|
if experiment_name == self.training_process.get_experiment_name():
|
|
356
359
|
return
|
|
@@ -9,7 +9,7 @@ import shutil
|
|
|
9
9
|
import subprocess
|
|
10
10
|
from datetime import datetime
|
|
11
11
|
from os import getcwd, listdir, walk
|
|
12
|
-
from os.path import basename, exists, expanduser, isdir, isfile, join
|
|
12
|
+
from os.path import basename, dirname, exists, expanduser, isdir, isfile, join
|
|
13
13
|
from typing import Any, Dict, List, Literal, Optional, Union
|
|
14
14
|
from urllib.request import urlopen
|
|
15
15
|
|
|
@@ -128,7 +128,7 @@ class TrainApp:
|
|
|
128
128
|
self._app_name = "custom-app"
|
|
129
129
|
self.task_id = sly_env.task_id(raise_not_found=False)
|
|
130
130
|
if self.task_id is None:
|
|
131
|
-
self.task_id =
|
|
131
|
+
self.task_id = -1
|
|
132
132
|
logger.info("TrainApp is running in debug mode")
|
|
133
133
|
|
|
134
134
|
self.framework_name = framework_name
|
|
@@ -580,7 +580,7 @@ class TrainApp:
|
|
|
580
580
|
|
|
581
581
|
# Step 6. Upload artifacts
|
|
582
582
|
self._set_text_status("uploading")
|
|
583
|
-
remote_dir,
|
|
583
|
+
remote_dir, session_link_file_info = self._upload_artifacts()
|
|
584
584
|
|
|
585
585
|
# Step 7. [Optional] Run Model Benchmark
|
|
586
586
|
mb_eval_lnk_file_info, mb_eval_report = None, None
|
|
@@ -650,12 +650,35 @@ class TrainApp:
|
|
|
650
650
|
|
|
651
651
|
# Step 10. Set output widgets
|
|
652
652
|
self._set_text_status("reset")
|
|
653
|
-
self._set_training_output(
|
|
653
|
+
self._set_training_output(
|
|
654
|
+
experiment_info, remote_dir, session_link_file_info, mb_eval_report
|
|
655
|
+
)
|
|
654
656
|
self._set_ws_progress_status("completed")
|
|
655
657
|
|
|
656
658
|
# Step 11. Workflow output
|
|
657
659
|
if is_production():
|
|
658
|
-
self.
|
|
660
|
+
best_checkpoint_file_info = self._get_best_checkpoint_info(experiment_info, remote_dir)
|
|
661
|
+
self._workflow_output(
|
|
662
|
+
remote_dir, best_checkpoint_file_info, mb_eval_lnk_file_info, mb_eval_report_id
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
def _get_best_checkpoint_info(self, experiment_info: dict, remote_dir: str) -> FileInfo:
|
|
666
|
+
"""
|
|
667
|
+
Returns the best checkpoint info.
|
|
668
|
+
|
|
669
|
+
:param experiment_info: Experiment info.
|
|
670
|
+
:type experiment_info: dict
|
|
671
|
+
:param remote_dir: Remote directory.
|
|
672
|
+
:type remote_dir: str
|
|
673
|
+
:return: Best checkpoint info.
|
|
674
|
+
:rtype: FileInfo
|
|
675
|
+
"""
|
|
676
|
+
best_checkpoint_name = experiment_info.get("best_checkpoint")
|
|
677
|
+
remote_best_checkpoint_path = join(remote_dir, "checkpoints", best_checkpoint_name)
|
|
678
|
+
best_checkpoint_file_info = self._api.file.get_info_by_path(
|
|
679
|
+
self.team_id, remote_best_checkpoint_path
|
|
680
|
+
)
|
|
681
|
+
return best_checkpoint_file_info
|
|
659
682
|
|
|
660
683
|
def register_inference_class(
|
|
661
684
|
self, inference_class: Inference, inference_settings: Union[str, dict] = None
|
|
@@ -686,6 +709,8 @@ class TrainApp:
|
|
|
686
709
|
"""
|
|
687
710
|
Returns the current state of the application.
|
|
688
711
|
|
|
712
|
+
:param experiment_info: Experiment info.
|
|
713
|
+
:type experiment_info: dict
|
|
689
714
|
:return: Application state.
|
|
690
715
|
:rtype: dict
|
|
691
716
|
"""
|
|
@@ -748,6 +773,27 @@ class TrainApp:
|
|
|
748
773
|
"""
|
|
749
774
|
self.gui.load_from_app_state(app_state)
|
|
750
775
|
|
|
776
|
+
def add_output_files(self, paths: List[str]) -> None:
|
|
777
|
+
"""
|
|
778
|
+
Copies files or directories to the output directory, which will be uploaded to the team files upon training completion.
|
|
779
|
+
If path is a file, it will be uploaded to the root artifacts directory.
|
|
780
|
+
If path is a directory, it will be uploded to the root artifacts directory with the same directory name and structure.
|
|
781
|
+
|
|
782
|
+
:param paths: List of paths to files or directories to be copied to the output directory.
|
|
783
|
+
:type paths: List[str]
|
|
784
|
+
:return: None
|
|
785
|
+
:rtype: None
|
|
786
|
+
"""
|
|
787
|
+
|
|
788
|
+
for path in paths:
|
|
789
|
+
if sly_fs.file_exists(path):
|
|
790
|
+
shutil.copyfile(path, join(self.output_dir, sly_fs.get_file_name_with_ext(path)))
|
|
791
|
+
elif sly_fs.dir_exists(path):
|
|
792
|
+
shutil.copytree(path, join(self.output_dir, basename(path)))
|
|
793
|
+
else:
|
|
794
|
+
logger.warning(f"Provided path: '{path}' does not exist. Skipping...")
|
|
795
|
+
continue
|
|
796
|
+
|
|
751
797
|
# Loaders
|
|
752
798
|
def _load_models(self, models: Union[str, List[Dict[str, Any]]]) -> List[Dict[str, Any]]:
|
|
753
799
|
"""
|
|
@@ -1549,7 +1595,7 @@ class TrainApp:
|
|
|
1549
1595
|
logger.debug(f"Uploading '{local_path}' to Supervisely")
|
|
1550
1596
|
total_size = sly_fs.get_file_size(local_path)
|
|
1551
1597
|
with self.progress_bar_main(
|
|
1552
|
-
message=message, total=total_size, unit="
|
|
1598
|
+
message=message, total=total_size, unit="B", unit_scale=True, unit_divisor=1024
|
|
1553
1599
|
) as upload_artifacts_pbar:
|
|
1554
1600
|
self.progress_bar_main.show()
|
|
1555
1601
|
file_info = self._api.file.upload(
|
|
@@ -1774,8 +1820,9 @@ class TrainApp:
|
|
|
1774
1820
|
with self.progress_bar_main(
|
|
1775
1821
|
message="Uploading demo files to Team Files",
|
|
1776
1822
|
total=total_size,
|
|
1777
|
-
unit="
|
|
1823
|
+
unit="B",
|
|
1778
1824
|
unit_scale=True,
|
|
1825
|
+
unit_divisor=1024,
|
|
1779
1826
|
) as upload_artifacts_pbar:
|
|
1780
1827
|
self.progress_bar_main.show()
|
|
1781
1828
|
remote_dir = self._api.file.upload_directory_fast(
|
|
@@ -1863,7 +1910,7 @@ class TrainApp:
|
|
|
1863
1910
|
f"Uploading artifacts directory: '{self.output_dir}' to Supervisely Team Files directory '{remote_artifacts_dir}'"
|
|
1864
1911
|
)
|
|
1865
1912
|
# Clean debug directory if exists
|
|
1866
|
-
if task_id ==
|
|
1913
|
+
if task_id == -1:
|
|
1867
1914
|
if self._api.file.dir_exists(self.team_id, f"{remote_artifacts_dir}/", True):
|
|
1868
1915
|
with self.progress_bar_main(
|
|
1869
1916
|
message=f"[Debug] Cleaning train artifacts: '{remote_artifacts_dir}/'",
|
|
@@ -1888,8 +1935,9 @@ class TrainApp:
|
|
|
1888
1935
|
with self.progress_bar_main(
|
|
1889
1936
|
message="Uploading train artifacts to Team Files",
|
|
1890
1937
|
total=total_size,
|
|
1891
|
-
unit="
|
|
1938
|
+
unit="B",
|
|
1892
1939
|
unit_scale=True,
|
|
1940
|
+
unit_divisor=1024,
|
|
1893
1941
|
) as upload_artifacts_pbar:
|
|
1894
1942
|
self.progress_bar_main.show()
|
|
1895
1943
|
remote_dir = self._api.file.upload_directory_fast(
|
|
@@ -1907,6 +1955,7 @@ class TrainApp:
|
|
|
1907
1955
|
"state": {"slyFolder": f"{join(remote_dir, 'logs')}"}
|
|
1908
1956
|
}
|
|
1909
1957
|
self.gui.training_logs.tensorboard_offline_button.enable()
|
|
1958
|
+
|
|
1910
1959
|
return remote_dir, file_info
|
|
1911
1960
|
|
|
1912
1961
|
def _set_training_output(
|
|
@@ -2095,6 +2144,7 @@ class TrainApp:
|
|
|
2095
2144
|
"model_meta": model_meta.to_json(),
|
|
2096
2145
|
"task_type": task_type,
|
|
2097
2146
|
}
|
|
2147
|
+
self._benchmark_params["without_workflow"] = True
|
|
2098
2148
|
|
|
2099
2149
|
logger.info(f"Deploy parameters: {self._benchmark_params}")
|
|
2100
2150
|
|
|
@@ -2162,16 +2212,19 @@ class TrainApp:
|
|
|
2162
2212
|
raise ValueError(f"Task type: '{task_type}' is not supported for Model Benchmark")
|
|
2163
2213
|
|
|
2164
2214
|
if self._has_splits_selector:
|
|
2215
|
+
app_session_id = self.task_id
|
|
2216
|
+
if app_session_id == -1:
|
|
2217
|
+
app_session_id = None
|
|
2165
2218
|
if self.gui.train_val_splits_selector.get_split_method() == "Based on datasets":
|
|
2166
2219
|
train_info = {
|
|
2167
|
-
"app_session_id":
|
|
2220
|
+
"app_session_id": app_session_id,
|
|
2168
2221
|
"train_dataset_ids": train_dataset_ids,
|
|
2169
2222
|
"train_images_ids": None,
|
|
2170
2223
|
"images_count": len(self._train_split),
|
|
2171
2224
|
}
|
|
2172
2225
|
else:
|
|
2173
2226
|
train_info = {
|
|
2174
|
-
"app_session_id":
|
|
2227
|
+
"app_session_id": app_session_id,
|
|
2175
2228
|
"train_dataset_ids": None,
|
|
2176
2229
|
"train_images_ids": train_images_ids,
|
|
2177
2230
|
"images_count": len(self._train_split),
|
|
@@ -2223,6 +2276,25 @@ class TrainApp:
|
|
|
2223
2276
|
|
|
2224
2277
|
except Exception as e:
|
|
2225
2278
|
logger.error(f"Model benchmark failed. {repr(e)}", exc_info=True)
|
|
2279
|
+
pred_error_message = (
|
|
2280
|
+
"Not found any predictions. Please make sure that your model produces predictions."
|
|
2281
|
+
)
|
|
2282
|
+
if isinstance(e, ValueError) and str(e) == pred_error_message:
|
|
2283
|
+
self.gui.training_artifacts.model_benchmark_fail_text.set(
|
|
2284
|
+
"The Model Evaluation report cannot be generated: The model is not making predictions. "
|
|
2285
|
+
"This indicates that your model may not have trained successfully or is underfitted. "
|
|
2286
|
+
"You can try increasing the number of epochs or adjusting the hyperparameters more carefully.",
|
|
2287
|
+
"warning",
|
|
2288
|
+
)
|
|
2289
|
+
|
|
2290
|
+
lnk_file_info, report, report_id, eval_metrics, primary_metric_name = (
|
|
2291
|
+
None,
|
|
2292
|
+
None,
|
|
2293
|
+
None,
|
|
2294
|
+
{},
|
|
2295
|
+
None,
|
|
2296
|
+
)
|
|
2297
|
+
|
|
2226
2298
|
self._set_text_status("finalizing")
|
|
2227
2299
|
self.progress_bar_main.hide()
|
|
2228
2300
|
self.progress_bar_secondary.hide()
|
|
@@ -2288,6 +2360,14 @@ class TrainApp:
|
|
|
2288
2360
|
):
|
|
2289
2361
|
"""
|
|
2290
2362
|
Adds the output data to the workflow.
|
|
2363
|
+
|
|
2364
|
+
:param team_files_dir: Team files directory.
|
|
2365
|
+
:type team_files_dir: str
|
|
2366
|
+
:param file_info: FileInfo of the best checkpoint.
|
|
2367
|
+
:type file_info: FileInfo
|
|
2368
|
+
:param model_benchmark_report: FileInfo of the model benchmark report link (.lnk).
|
|
2369
|
+
:type model_benchmark_report: Optional[FileInfo]
|
|
2370
|
+
:param model_benchmark_report_id: Model benchmark report ID.
|
|
2291
2371
|
"""
|
|
2292
2372
|
try:
|
|
2293
2373
|
module_id = (
|
|
@@ -2310,7 +2390,7 @@ class TrainApp:
|
|
|
2310
2390
|
|
|
2311
2391
|
if file_info:
|
|
2312
2392
|
relation_settings = WorkflowSettings(
|
|
2313
|
-
title="
|
|
2393
|
+
title="Checkpoints",
|
|
2314
2394
|
icon="folder",
|
|
2315
2395
|
icon_color="#FFA500",
|
|
2316
2396
|
icon_bg_color="#FFE8BE",
|
|
@@ -2321,7 +2401,10 @@ class TrainApp:
|
|
|
2321
2401
|
relation_settings=relation_settings, node_settings=node_settings
|
|
2322
2402
|
)
|
|
2323
2403
|
logger.debug(f"Workflow Output: meta \n {meta}")
|
|
2324
|
-
self._api.app.workflow.add_output_file(file_info, model_weight=True, meta=meta)
|
|
2404
|
+
# self._api.app.workflow.add_output_file(file_info, model_weight=True, meta=meta)
|
|
2405
|
+
|
|
2406
|
+
remote_checkpoint_dir = dirname(file_info.path)
|
|
2407
|
+
self._api.app.workflow.add_output_folder(remote_checkpoint_dir, meta=meta)
|
|
2325
2408
|
else:
|
|
2326
2409
|
logger.debug(
|
|
2327
2410
|
f"File with checkpoints not found in Team Files. Cannot set workflow output."
|
|
@@ -2370,13 +2453,14 @@ class TrainApp:
|
|
|
2370
2453
|
logger.debug("Tensorboard server is already running")
|
|
2371
2454
|
return
|
|
2372
2455
|
self._register_routes()
|
|
2456
|
+
|
|
2373
2457
|
args = [
|
|
2374
2458
|
"tensorboard",
|
|
2375
2459
|
"--logdir",
|
|
2376
2460
|
self.log_dir,
|
|
2377
2461
|
"--host=localhost",
|
|
2378
2462
|
f"--port={self._tensorboard_port}",
|
|
2379
|
-
"--load_fast=
|
|
2463
|
+
"--load_fast=auto",
|
|
2380
2464
|
"--reload_multifile=true",
|
|
2381
2465
|
]
|
|
2382
2466
|
self._tensorboard_process = subprocess.Popen(args)
|
|
@@ -2522,6 +2606,9 @@ class TrainApp:
|
|
|
2522
2606
|
self._set_ws_progress_status("finalizing")
|
|
2523
2607
|
self._finalize(experiment_info)
|
|
2524
2608
|
self.gui.training_process.start_button.loading = False
|
|
2609
|
+
|
|
2610
|
+
# Shutdown the app after training is finished
|
|
2611
|
+
self.app.shutdown()
|
|
2525
2612
|
except Exception as e:
|
|
2526
2613
|
message = f"Error occurred during finalizing and uploading training artifacts. {check_logs_text}"
|
|
2527
2614
|
self._show_error(message, e)
|
|
@@ -2706,6 +2793,7 @@ class TrainApp:
|
|
|
2706
2793
|
total=size,
|
|
2707
2794
|
unit="B",
|
|
2708
2795
|
unit_scale=True,
|
|
2796
|
+
unit_divisor=1024,
|
|
2709
2797
|
) as export_upload_main_pbar:
|
|
2710
2798
|
logger.debug(f"Uploading {len(export_weights)} export weights of size {size} bytes")
|
|
2711
2799
|
logger.debug(f"Destination paths: {file_dest_paths}")
|
|
@@ -59,7 +59,7 @@ supervisely/api/nn/deploy_api.py,sha256=V7zWO8yoroFDifIqLIFYmZA72tLQriH5kYAhN0-5
|
|
|
59
59
|
supervisely/api/nn/neural_network_api.py,sha256=vZyYBaKQzLJX9G3SAt09LmsxNLC8h88oYJ9b_PACzp0,10466
|
|
60
60
|
supervisely/api/pointcloud/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
61
61
|
supervisely/api/pointcloud/pointcloud_annotation_api.py,sha256=xIXqCu0rKYsGt5ezh2EFT2utwsVrr2Xo-MOWUCnbvXc,11259
|
|
62
|
-
supervisely/api/pointcloud/pointcloud_api.py,sha256=
|
|
62
|
+
supervisely/api/pointcloud/pointcloud_api.py,sha256=Gii6INYqo5f3EUCkI14VMi2XuaxbRHEaqSb_HHmJJTA,53497
|
|
63
63
|
supervisely/api/pointcloud/pointcloud_episode_annotation_api.py,sha256=zmDWDkRzUSfHKX65qDVrc44kNyYjfvItSwYmsERJ_8g,7012
|
|
64
64
|
supervisely/api/pointcloud/pointcloud_episode_api.py,sha256=xg1zRKONV9ly0-B72V1dR6OMPFIw35bujazuEdrPGTQ,7922
|
|
65
65
|
supervisely/api/pointcloud/pointcloud_episode_object_api.py,sha256=k2_wV0EVPo9vxSTVe1qOvqVOMSVE6zGDSkfR6TRNsKs,691
|
|
@@ -255,7 +255,7 @@ supervisely/app/widgets/empty/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
|
|
|
255
255
|
supervisely/app/widgets/empty/empty.py,sha256=fCr8I7CQ2XLo59bl2txjDrblOGiu0TzUcM-Pq6s7gKY,1285
|
|
256
256
|
supervisely/app/widgets/empty/template.html,sha256=aDBKkin5aLuqByzNN517-rTYCGIg5SPKgnysYMPYjv8,40
|
|
257
257
|
supervisely/app/widgets/experiment_selector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
258
|
-
supervisely/app/widgets/experiment_selector/experiment_selector.py,sha256=
|
|
258
|
+
supervisely/app/widgets/experiment_selector/experiment_selector.py,sha256=MButdiR6j6bpvJRI9iYtO5UjQu_Dc4ABzcrPsy2YcRg,19933
|
|
259
259
|
supervisely/app/widgets/experiment_selector/style.css,sha256=-zPPXHnJvatYj_xVVAb7T8uoSsUTyhm5xCKWkkFQ78E,548
|
|
260
260
|
supervisely/app/widgets/experiment_selector/template.html,sha256=k7f_Xl6nDUXXwu6IY_RblYni5TbZRRxCBduY5O_SyFs,2908
|
|
261
261
|
supervisely/app/widgets/fast_table/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -887,8 +887,8 @@ supervisely/nn/benchmark/visualization/widgets/sidebar/sidebar.py,sha256=tKPURRS
|
|
|
887
887
|
supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
888
888
|
supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
|
|
889
889
|
supervisely/nn/inference/__init__.py,sha256=QFukX2ip-U7263aEPCF_UCFwj6EujbMnsgrXp5Bbt8I,1623
|
|
890
|
-
supervisely/nn/inference/cache.py,sha256=
|
|
891
|
-
supervisely/nn/inference/inference.py,sha256=
|
|
890
|
+
supervisely/nn/inference/cache.py,sha256=yqVPIWzhIDRHwrCIpdm-gPxUM2rH8BD98omF659RElw,34938
|
|
891
|
+
supervisely/nn/inference/inference.py,sha256=gq0yvMiFZ2FfJvvEmw7PRHa0GikCwRX9S2CE_0fuGX4,177798
|
|
892
892
|
supervisely/nn/inference/inference_request.py,sha256=y6yw0vbaRRcEBS27nq3y0sL6Gmq2qLA_Bm0GrnJGegE,14267
|
|
893
893
|
supervisely/nn/inference/session.py,sha256=dIg2F-OBl68pUzcmtmcI0YQIp1WWNnrJTVMjwFN91Q4,35824
|
|
894
894
|
supervisely/nn/inference/uploader.py,sha256=21a9coOimCHhEqAbV-llZWcp12847DEMoQp3N16bpK0,5425
|
|
@@ -906,7 +906,7 @@ supervisely/nn/inference/instance_segmentation/dashboard/main_ui.py,sha256=_M46q
|
|
|
906
906
|
supervisely/nn/inference/instance_segmentation/dashboard/preview.py,sha256=uwUDlJzDLgKlGOn5CovCh03WAijlx04iyPs00qbsUpU,847
|
|
907
907
|
supervisely/nn/inference/instance_segmentation/dashboard/preview_image.py,sha256=wXfPg7bAT2xXPlnl2zywkx4MJ66myl5KBGzrvoyS8zQ,279
|
|
908
908
|
supervisely/nn/inference/interactive_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
909
|
-
supervisely/nn/inference/interactive_segmentation/functional.py,sha256=
|
|
909
|
+
supervisely/nn/inference/interactive_segmentation/functional.py,sha256=3GaeeOemji_ym0-C2mwj6SVEhnPkq7v3KCc4npw3iZ8,4905
|
|
910
910
|
supervisely/nn/inference/interactive_segmentation/interactive_segmentation.py,sha256=vX1H9eaR8oRfMecQxJfpu6hGxbtf8bEESGJp_fPmfWc,20402
|
|
911
911
|
supervisely/nn/inference/object_detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
912
912
|
supervisely/nn/inference/object_detection/object_detection.py,sha256=Ki4updsAid1KOI2ahcdVVZ9lzdyRcYKyuQyzSz_9sXQ,1746
|
|
@@ -994,10 +994,10 @@ supervisely/nn/tracking/__init__.py,sha256=Ld1ed7ZZQZPkhX-5Xr-UbHZx5zLCm2-tInHnP
|
|
|
994
994
|
supervisely/nn/tracking/boxmot.py,sha256=H9cQjYGL9nX_TLrfKDChhljTIiE9lffcgbwWCf_4PJU,4277
|
|
995
995
|
supervisely/nn/tracking/tracking.py,sha256=WNrNm02B1pspA3d_AmzSJ-54RZTqWV2NZiC7FHe88bo,857
|
|
996
996
|
supervisely/nn/training/__init__.py,sha256=gY4PCykJ-42MWKsqb9kl-skemKa8yB6t_fb5kzqR66U,111
|
|
997
|
-
supervisely/nn/training/train_app.py,sha256=
|
|
997
|
+
supervisely/nn/training/train_app.py,sha256=D2Fuy1SzoHTqeMWrdLLVLqeZN5Eu6M_CzU85y78na6I,116077
|
|
998
998
|
supervisely/nn/training/gui/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
|
|
999
999
|
supervisely/nn/training/gui/classes_selector.py,sha256=Bpp-RFDQqcZ0kLJmS6ZnExkdscWwRusvF4vbWjEsKlQ,3926
|
|
1000
|
-
supervisely/nn/training/gui/gui.py,sha256=
|
|
1000
|
+
supervisely/nn/training/gui/gui.py,sha256=Z68uMPNkOyb70rpxfVDfJuGSzcoOhrqqDog8PABF2JQ,43312
|
|
1001
1001
|
supervisely/nn/training/gui/hyperparameters_selector.py,sha256=5dUCYAx4E0HBLguj2B_s2nWeGGCWzv6vJeT0XvDJO3M,7746
|
|
1002
1002
|
supervisely/nn/training/gui/input_selector.py,sha256=rmirJzpdxuYONI6y5_cvMdGWBJ--T20YTsISghATHu4,2510
|
|
1003
1003
|
supervisely/nn/training/gui/model_selector.py,sha256=I6KRKyylpwUEC3CApEnzDKkWe5xqju0Az3D0Eg32Jdc,5352
|
|
@@ -1097,9 +1097,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
1097
1097
|
supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
|
|
1098
1098
|
supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
|
|
1099
1099
|
supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
|
|
1100
|
-
supervisely-6.73.
|
|
1101
|
-
supervisely-6.73.
|
|
1102
|
-
supervisely-6.73.
|
|
1103
|
-
supervisely-6.73.
|
|
1104
|
-
supervisely-6.73.
|
|
1105
|
-
supervisely-6.73.
|
|
1100
|
+
supervisely-6.73.371.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
1101
|
+
supervisely-6.73.371.dist-info/METADATA,sha256=kxGz3tIoXNmaQE4KCZ4zyX0iv1BAzItmJ2DjN4jZKkw,35154
|
|
1102
|
+
supervisely-6.73.371.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
1103
|
+
supervisely-6.73.371.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
|
|
1104
|
+
supervisely-6.73.371.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
|
|
1105
|
+
supervisely-6.73.371.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|