supervisely 6.73.452__py3-none-any.whl → 6.73.513__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/__init__.py +25 -1
- supervisely/annotation/annotation.py +8 -2
- supervisely/annotation/json_geometries_map.py +13 -12
- supervisely/api/annotation_api.py +6 -3
- supervisely/api/api.py +2 -0
- supervisely/api/app_api.py +10 -1
- supervisely/api/dataset_api.py +74 -12
- supervisely/api/entities_collection_api.py +10 -0
- supervisely/api/entity_annotation/figure_api.py +28 -0
- supervisely/api/entity_annotation/object_api.py +3 -3
- supervisely/api/entity_annotation/tag_api.py +63 -12
- supervisely/api/guides_api.py +210 -0
- supervisely/api/image_api.py +4 -0
- supervisely/api/labeling_job_api.py +83 -1
- supervisely/api/labeling_queue_api.py +33 -7
- supervisely/api/module_api.py +5 -0
- supervisely/api/project_api.py +71 -26
- supervisely/api/storage_api.py +3 -1
- supervisely/api/task_api.py +13 -2
- supervisely/api/team_api.py +4 -3
- supervisely/api/video/video_annotation_api.py +119 -3
- supervisely/api/video/video_api.py +65 -14
- supervisely/app/__init__.py +1 -1
- supervisely/app/content.py +23 -7
- supervisely/app/development/development.py +18 -2
- supervisely/app/fastapi/__init__.py +1 -0
- supervisely/app/fastapi/custom_static_files.py +1 -1
- supervisely/app/fastapi/multi_user.py +105 -0
- supervisely/app/fastapi/subapp.py +88 -42
- supervisely/app/fastapi/websocket.py +77 -9
- supervisely/app/singleton.py +21 -0
- supervisely/app/v1/app_service.py +18 -2
- supervisely/app/v1/constants.py +7 -1
- supervisely/app/widgets/__init__.py +6 -0
- supervisely/app/widgets/activity_feed/__init__.py +0 -0
- supervisely/app/widgets/activity_feed/activity_feed.py +239 -0
- supervisely/app/widgets/activity_feed/style.css +78 -0
- supervisely/app/widgets/activity_feed/template.html +22 -0
- supervisely/app/widgets/card/card.py +20 -0
- supervisely/app/widgets/classes_list_selector/classes_list_selector.py +121 -9
- supervisely/app/widgets/classes_list_selector/template.html +60 -93
- supervisely/app/widgets/classes_mapping/classes_mapping.py +13 -12
- supervisely/app/widgets/classes_table/classes_table.py +1 -0
- supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
- supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +1 -1
- supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
- supervisely/app/widgets/fast_table/fast_table.py +184 -60
- supervisely/app/widgets/fast_table/template.html +1 -1
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +564 -0
- supervisely/app/widgets/heatmap/script.js +533 -0
- supervisely/app/widgets/heatmap/style.css +233 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/modal/__init__.py +0 -0
- supervisely/app/widgets/modal/modal.py +198 -0
- supervisely/app/widgets/modal/template.html +10 -0
- supervisely/app/widgets/object_class_view/object_class_view.py +3 -0
- supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
- supervisely/app/widgets/radio_tabs/template.html +1 -0
- supervisely/app/widgets/select/select.py +6 -3
- supervisely/app/widgets/select_class/__init__.py +0 -0
- supervisely/app/widgets/select_class/select_class.py +363 -0
- supervisely/app/widgets/select_class/template.html +50 -0
- supervisely/app/widgets/select_cuda/select_cuda.py +22 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +65 -7
- supervisely/app/widgets/select_tag/__init__.py +0 -0
- supervisely/app/widgets/select_tag/select_tag.py +352 -0
- supervisely/app/widgets/select_tag/template.html +64 -0
- supervisely/app/widgets/select_team/select_team.py +37 -4
- supervisely/app/widgets/select_team/template.html +4 -5
- supervisely/app/widgets/select_user/__init__.py +0 -0
- supervisely/app/widgets/select_user/select_user.py +270 -0
- supervisely/app/widgets/select_user/template.html +13 -0
- supervisely/app/widgets/select_workspace/select_workspace.py +59 -10
- supervisely/app/widgets/select_workspace/template.html +9 -12
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/app/widgets/tree_select/tree_select.py +2 -0
- supervisely/aug/aug.py +6 -2
- supervisely/convert/base_converter.py +1 -0
- supervisely/convert/converter.py +2 -2
- supervisely/convert/image/image_converter.py +3 -1
- supervisely/convert/image/image_helper.py +48 -4
- supervisely/convert/image/label_studio/label_studio_converter.py +2 -0
- supervisely/convert/image/medical2d/medical2d_helper.py +2 -24
- supervisely/convert/image/multispectral/multispectral_converter.py +6 -0
- supervisely/convert/image/pascal_voc/pascal_voc_converter.py +8 -5
- supervisely/convert/image/pascal_voc/pascal_voc_helper.py +7 -0
- supervisely/convert/pointcloud/kitti_3d/kitti_3d_converter.py +33 -3
- supervisely/convert/pointcloud/kitti_3d/kitti_3d_helper.py +12 -5
- supervisely/convert/pointcloud/las/las_converter.py +13 -1
- supervisely/convert/pointcloud/las/las_helper.py +110 -11
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
- supervisely/convert/pointcloud/pointcloud_converter.py +91 -3
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
- supervisely/convert/video/__init__.py +1 -0
- supervisely/convert/video/multi_view/__init__.py +0 -0
- supervisely/convert/video/multi_view/multi_view.py +543 -0
- supervisely/convert/video/sly/sly_video_converter.py +359 -3
- supervisely/convert/video/video_converter.py +22 -2
- supervisely/convert/volume/dicom/dicom_converter.py +13 -5
- supervisely/convert/volume/dicom/dicom_helper.py +30 -18
- supervisely/geometry/constants.py +1 -0
- supervisely/geometry/geometry.py +4 -0
- supervisely/geometry/helpers.py +5 -1
- supervisely/geometry/oriented_bbox.py +676 -0
- supervisely/geometry/rectangle.py +2 -1
- supervisely/io/env.py +76 -1
- supervisely/io/fs.py +21 -0
- supervisely/nn/benchmark/base_evaluator.py +104 -11
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -8
- supervisely/nn/benchmark/object_detection/evaluator.py +20 -4
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +10 -5
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +34 -16
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +1 -1
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +1 -1
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -1
- supervisely/nn/benchmark/visualization/evaluation_result.py +66 -4
- supervisely/nn/inference/cache.py +43 -18
- supervisely/nn/inference/gui/serving_gui_template.py +5 -2
- supervisely/nn/inference/inference.py +795 -199
- supervisely/nn/inference/inference_request.py +42 -9
- supervisely/nn/inference/predict_app/gui/classes_selector.py +83 -12
- supervisely/nn/inference/predict_app/gui/gui.py +676 -488
- supervisely/nn/inference/predict_app/gui/input_selector.py +205 -26
- supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
- supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
- supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
- supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
- supervisely/nn/inference/predict_app/gui/utils.py +236 -119
- supervisely/nn/inference/predict_app/predict_app.py +2 -2
- supervisely/nn/inference/session.py +43 -35
- supervisely/nn/inference/tracking/bbox_tracking.py +113 -34
- supervisely/nn/inference/tracking/tracker_interface.py +7 -2
- supervisely/nn/inference/uploader.py +139 -12
- supervisely/nn/live_training/__init__.py +7 -0
- supervisely/nn/live_training/api_server.py +111 -0
- supervisely/nn/live_training/artifacts_utils.py +243 -0
- supervisely/nn/live_training/checkpoint_utils.py +229 -0
- supervisely/nn/live_training/dynamic_sampler.py +44 -0
- supervisely/nn/live_training/helpers.py +14 -0
- supervisely/nn/live_training/incremental_dataset.py +146 -0
- supervisely/nn/live_training/live_training.py +497 -0
- supervisely/nn/live_training/loss_plateau_detector.py +111 -0
- supervisely/nn/live_training/request_queue.py +52 -0
- supervisely/nn/model/model_api.py +9 -0
- supervisely/nn/prediction_dto.py +12 -1
- supervisely/nn/tracker/base_tracker.py +11 -1
- supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
- supervisely/nn/tracker/botsort/tracker/mc_bot_sort.py +7 -4
- supervisely/nn/tracker/botsort_tracker.py +94 -65
- supervisely/nn/tracker/visualize.py +87 -90
- supervisely/nn/training/gui/classes_selector.py +16 -1
- supervisely/nn/training/train_app.py +28 -29
- supervisely/project/data_version.py +115 -51
- supervisely/project/download.py +1 -1
- supervisely/project/pointcloud_episode_project.py +37 -8
- supervisely/project/pointcloud_project.py +30 -2
- supervisely/project/project.py +14 -2
- supervisely/project/project_meta.py +27 -1
- supervisely/project/project_settings.py +32 -18
- supervisely/project/versioning/__init__.py +1 -0
- supervisely/project/versioning/common.py +20 -0
- supervisely/project/versioning/schema_fields.py +35 -0
- supervisely/project/versioning/video_schema.py +221 -0
- supervisely/project/versioning/volume_schema.py +87 -0
- supervisely/project/video_project.py +717 -15
- supervisely/project/volume_project.py +623 -5
- supervisely/template/experiment/experiment.html.jinja +4 -4
- supervisely/template/experiment/experiment_generator.py +14 -21
- supervisely/template/live_training/__init__.py +0 -0
- supervisely/template/live_training/header.html.jinja +96 -0
- supervisely/template/live_training/live_training.html.jinja +51 -0
- supervisely/template/live_training/live_training_generator.py +464 -0
- supervisely/template/live_training/sly-style.css +402 -0
- supervisely/template/live_training/template.html.jinja +18 -0
- supervisely/versions.json +28 -26
- supervisely/video/sampling.py +39 -20
- supervisely/video/video.py +40 -11
- supervisely/video_annotation/video_object.py +29 -4
- supervisely/volume/stl_converter.py +2 -0
- supervisely/worker_api/agent_rpc.py +24 -1
- supervisely/worker_api/rpc_servicer.py +31 -7
- {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/METADATA +56 -39
- {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/RECORD +189 -142
- {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/WHEEL +1 -1
- {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info/licenses}/LICENSE +0 -0
- {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/top_level.txt +0 -0
supervisely/api/storage_api.py
CHANGED
|
@@ -228,7 +228,9 @@ class StorageApi(FileApi):
|
|
|
228
228
|
path_infos = self.list(team_id, parent_dir, recursive=False, return_type="dict")
|
|
229
229
|
for info in path_infos:
|
|
230
230
|
if info["type"] == path_type:
|
|
231
|
-
if info["path"]
|
|
231
|
+
if path_type == "file" and info["path"] == remote_path:
|
|
232
|
+
return True
|
|
233
|
+
elif path_type == "folder" and info["path"].rstrip("/") == remote_path.rstrip("/"):
|
|
232
234
|
return True
|
|
233
235
|
return False
|
|
234
236
|
|
supervisely/api/task_api.py
CHANGED
|
@@ -390,6 +390,7 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
390
390
|
redirect_requests: Optional[Dict[str, int]] = {},
|
|
391
391
|
limit_by_workspace: bool = False,
|
|
392
392
|
kubernetes_settings: Optional[Union[KubernetesSettings, Dict[str, Any]]] = None,
|
|
393
|
+
multi_user_session: bool = False,
|
|
393
394
|
) -> Dict[str, Any]:
|
|
394
395
|
"""Starts the application task on the agent.
|
|
395
396
|
|
|
@@ -428,6 +429,11 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
428
429
|
:type limit_by_workspace: bool, optional
|
|
429
430
|
:param kubernetes_settings: Kubernetes settings for the application.
|
|
430
431
|
:type kubernetes_settings: Union[KubernetesSettings, Dict[str, Any]], optional
|
|
432
|
+
:param multi_user_session: If True, the application session will be created as multi-user.
|
|
433
|
+
In this case, multiple users will be able to connect to the same application session.
|
|
434
|
+
All users will have separate application states.
|
|
435
|
+
Available only for applications that support multi-user sessions.
|
|
436
|
+
:type multi_user_session: bool, default is False
|
|
431
437
|
:return: Task information in JSON format.
|
|
432
438
|
:rtype: Dict[str, Any]
|
|
433
439
|
|
|
@@ -497,6 +503,11 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
497
503
|
data[ApiField.APP_ID] = app_id
|
|
498
504
|
if module_id is not None:
|
|
499
505
|
data[ApiField.MODULE_ID] = module_id
|
|
506
|
+
if multi_user_session:
|
|
507
|
+
# * Enables single multi-user session mode for all users in the users_ids list.
|
|
508
|
+
# * Otherwise, if users_ids contains multiple IDs, separate single-user sessions will be created for each.
|
|
509
|
+
# * If users_ids is empty, a session is created only for the current user.
|
|
510
|
+
data[ApiField.SINGLE_SESSION_MODE] = multi_user_session
|
|
500
511
|
resp = self._api.post(method="tasks.run.app", data=data)
|
|
501
512
|
task = resp.json()[0]
|
|
502
513
|
if "id" not in task:
|
|
@@ -805,8 +816,8 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
|
|
|
805
816
|
):
|
|
806
817
|
"""
|
|
807
818
|
Update given task metadata
|
|
808
|
-
:param id: int
|
|
809
|
-
:param data: dict
|
|
819
|
+
:param id: int - task id
|
|
820
|
+
:param data: dict - meta data to update
|
|
810
821
|
"""
|
|
811
822
|
if type(data) == dict:
|
|
812
823
|
data.update({"id": id})
|
supervisely/api/team_api.py
CHANGED
|
@@ -132,7 +132,7 @@ class ActivityAction:
|
|
|
132
132
|
class UsageInfo(NamedTuple):
|
|
133
133
|
""" """
|
|
134
134
|
|
|
135
|
-
plan: str
|
|
135
|
+
plan: Optional[str]
|
|
136
136
|
|
|
137
137
|
|
|
138
138
|
class TeamInfo(NamedTuple):
|
|
@@ -144,7 +144,7 @@ class TeamInfo(NamedTuple):
|
|
|
144
144
|
role: str
|
|
145
145
|
created_at: str
|
|
146
146
|
updated_at: str
|
|
147
|
-
usage: UsageInfo
|
|
147
|
+
usage: Optional[UsageInfo]
|
|
148
148
|
|
|
149
149
|
|
|
150
150
|
class TeamApi(ModuleNoParent, UpdateableModule):
|
|
@@ -565,5 +565,6 @@ class TeamApi(ModuleNoParent, UpdateableModule):
|
|
|
565
565
|
res = super()._convert_json_info(info, skip_missing=skip_missing)
|
|
566
566
|
res_dict = res._asdict()
|
|
567
567
|
if isinstance(res_dict.get("usage"), dict):
|
|
568
|
-
|
|
568
|
+
usage_dict = {f: res_dict["usage"].get(f) for f in UsageInfo._fields}
|
|
569
|
+
res_dict["usage"] = UsageInfo(**usage_dict)
|
|
569
570
|
return TeamInfo(**res_dict)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
from __future__ import annotations
|
|
3
3
|
|
|
4
4
|
import asyncio
|
|
5
|
+
from collections import defaultdict
|
|
5
6
|
from typing import Callable, Dict, List, Optional, Union
|
|
6
7
|
|
|
7
8
|
from tqdm import tqdm
|
|
@@ -13,6 +14,7 @@ from supervisely.io.json import load_json_file
|
|
|
13
14
|
from supervisely.project.project_meta import ProjectMeta
|
|
14
15
|
from supervisely.video_annotation.key_id_map import KeyIdMap
|
|
15
16
|
from supervisely.video_annotation.video_annotation import VideoAnnotation
|
|
17
|
+
from supervisely.video_annotation.video_tag_collection import VideoTagCollection
|
|
16
18
|
|
|
17
19
|
|
|
18
20
|
class VideoAnnotationAPI(EntityAnnotationAPI):
|
|
@@ -173,7 +175,6 @@ class VideoAnnotationAPI(EntityAnnotationAPI):
|
|
|
173
175
|
api.video.annotation.upload_paths(video_ids, ann_paths, meta)
|
|
174
176
|
"""
|
|
175
177
|
# video_ids from the same dataset
|
|
176
|
-
|
|
177
178
|
for video_id, ann_path in zip(video_ids, ann_paths):
|
|
178
179
|
ann_json = load_json_file(ann_path)
|
|
179
180
|
ann = VideoAnnotation.from_json(ann_json, project_meta)
|
|
@@ -183,6 +184,119 @@ class VideoAnnotationAPI(EntityAnnotationAPI):
|
|
|
183
184
|
if progress_cb is not None:
|
|
184
185
|
progress_cb(1)
|
|
185
186
|
|
|
187
|
+
def upload_paths_multiview(
|
|
188
|
+
self,
|
|
189
|
+
video_ids: List[int],
|
|
190
|
+
ann_paths: List[str],
|
|
191
|
+
project_meta: ProjectMeta,
|
|
192
|
+
progress_cb: Optional[Union[tqdm, Callable]] = None,
|
|
193
|
+
) -> None:
|
|
194
|
+
"""
|
|
195
|
+
Upload VideoAnnotations for multi-view video project.
|
|
196
|
+
All provided video ids must belong to the same project and dataset.
|
|
197
|
+
|
|
198
|
+
Objects with the same key are created only once and shared between videos.
|
|
199
|
+
In this mode annotation objects are created without binding to a specific entityId.
|
|
200
|
+
|
|
201
|
+
:param video_ids: Video IDs in Supervisely.
|
|
202
|
+
:type video_ids: List[int]
|
|
203
|
+
:param ann_paths: Paths to annotations on local machine.
|
|
204
|
+
:type ann_paths: List[str]
|
|
205
|
+
:param project_meta: Input :class:`ProjectMeta<supervisely.project.project_meta.ProjectMeta>` for VideoAnnotations.
|
|
206
|
+
:type project_meta: ProjectMeta
|
|
207
|
+
:param progress_cb: Function for tracking upload progress.
|
|
208
|
+
:type progress_cb: tqdm or callable, optional
|
|
209
|
+
:return: None
|
|
210
|
+
:rtype: :class:`NoneType`
|
|
211
|
+
"""
|
|
212
|
+
if len(video_ids) != len(ann_paths):
|
|
213
|
+
raise RuntimeError(
|
|
214
|
+
f'Can not match "video_ids" and "ann_paths" lists, len(video_ids) != len(ann_paths): {len(video_ids)} != {len(ann_paths)}'
|
|
215
|
+
)
|
|
216
|
+
if len(video_ids) == 0:
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
anns = []
|
|
220
|
+
for ann_path in ann_paths:
|
|
221
|
+
ann_json = load_json_file(ann_path)
|
|
222
|
+
ann = VideoAnnotation.from_json(ann_json, project_meta)
|
|
223
|
+
anns.append(ann)
|
|
224
|
+
|
|
225
|
+
self.upload_anns_multiview(video_ids, anns, progress_cb)
|
|
226
|
+
|
|
227
|
+
def upload_anns_multiview(
|
|
228
|
+
self,
|
|
229
|
+
video_ids: List[int],
|
|
230
|
+
anns: List[VideoAnnotation],
|
|
231
|
+
progress_cb: Optional[Union[tqdm, Callable]] = None,
|
|
232
|
+
) -> None:
|
|
233
|
+
"""
|
|
234
|
+
Upload already constructed VideoAnnotation objects for multi-view video project.
|
|
235
|
+
All provided video ids must belong to the same project and dataset.
|
|
236
|
+
|
|
237
|
+
Objects with the same key are created only once and shared between videos.
|
|
238
|
+
In this mode annotation objects are created without binding to a specific entityId.
|
|
239
|
+
|
|
240
|
+
:param video_ids: Video IDs in Supervisely.
|
|
241
|
+
:type video_ids: List[int]
|
|
242
|
+
:param anns: List of VideoAnnotation objects corresponding to the video_ids.
|
|
243
|
+
:type anns: List[VideoAnnotation]
|
|
244
|
+
:param progress_cb: Function for tracking upload progress (by number of figures).
|
|
245
|
+
:type progress_cb: tqdm or callable, optional
|
|
246
|
+
:return: None
|
|
247
|
+
:rtype: :class:`NoneType`
|
|
248
|
+
"""
|
|
249
|
+
if len(video_ids) != len(anns):
|
|
250
|
+
raise RuntimeError(
|
|
251
|
+
'Can not match "video_ids" and "anns" lists, len(video_ids) != len(anns)'
|
|
252
|
+
)
|
|
253
|
+
if len(video_ids) == 0:
|
|
254
|
+
return
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
video_infos = self._api.video.get_info_by_id_batch(video_ids)
|
|
258
|
+
except RuntimeError as e:
|
|
259
|
+
raise RuntimeError("All videos must belong to the same project and dataset.") from e
|
|
260
|
+
|
|
261
|
+
project_id = video_infos[0].project_id
|
|
262
|
+
dataset_id = video_infos[0].dataset_id
|
|
263
|
+
|
|
264
|
+
tag_api = self._api.video.tag
|
|
265
|
+
object_api = self._api.video.object
|
|
266
|
+
figure_api = self._api.video.figure
|
|
267
|
+
|
|
268
|
+
key_id_map = KeyIdMap()
|
|
269
|
+
for video_id, ann in zip(video_ids, anns):
|
|
270
|
+
tag_api.append_to_entity(video_id, project_id, ann.tags, key_id_map=key_id_map)
|
|
271
|
+
new_objects = []
|
|
272
|
+
for obj in ann.objects:
|
|
273
|
+
if key_id_map.get_object_id(obj.key()) is None:
|
|
274
|
+
new_objects.append(obj)
|
|
275
|
+
if len(new_objects) > 0:
|
|
276
|
+
object_api._append_bulk(
|
|
277
|
+
tag_api=tag_api,
|
|
278
|
+
entity_id=video_id,
|
|
279
|
+
project_id=project_id,
|
|
280
|
+
dataset_id=dataset_id,
|
|
281
|
+
objects=new_objects,
|
|
282
|
+
key_id_map=key_id_map,
|
|
283
|
+
is_pointcloud=False,
|
|
284
|
+
is_video_multi_view=True,
|
|
285
|
+
)
|
|
286
|
+
tags_to_obj = {}
|
|
287
|
+
for obj in ann.objects:
|
|
288
|
+
obj_id = key_id_map.get_object_id(obj.key())
|
|
289
|
+
tags_to_obj[obj_id] = obj.tags
|
|
290
|
+
if len(tags_to_obj) > 0:
|
|
291
|
+
tag_api.add_tags_collection_to_objects(project_id, tags_to_obj, is_video_multi_view=True, entity_id=video_id)
|
|
292
|
+
|
|
293
|
+
figure_api.append_bulk(video_id, ann.figures, key_id_map)
|
|
294
|
+
if progress_cb is not None and len(ann.figures) > 0:
|
|
295
|
+
if hasattr(progress_cb, "update") and callable(getattr(progress_cb, "update")):
|
|
296
|
+
progress_cb.update(len(ann.figures))
|
|
297
|
+
else:
|
|
298
|
+
progress_cb(len(ann.figures))
|
|
299
|
+
|
|
186
300
|
def copy_batch(
|
|
187
301
|
self,
|
|
188
302
|
src_video_ids: List[int],
|
|
@@ -236,11 +350,13 @@ class VideoAnnotationAPI(EntityAnnotationAPI):
|
|
|
236
350
|
dst_project_meta = ProjectMeta.from_json(
|
|
237
351
|
self._api.project.get_meta(dst_dataset_info.project_id)
|
|
238
352
|
)
|
|
239
|
-
for src_ids_batch, dst_ids_batch in batched(
|
|
353
|
+
for src_ids_batch, dst_ids_batch in zip(batched(src_video_ids), batched(dst_video_ids)):
|
|
240
354
|
ann_jsons = self.download_bulk(src_dataset_id, src_ids_batch)
|
|
241
355
|
for dst_id, ann_json in zip(dst_ids_batch, ann_jsons):
|
|
242
356
|
try:
|
|
243
|
-
ann = VideoAnnotation.from_json(
|
|
357
|
+
ann = VideoAnnotation.from_json(
|
|
358
|
+
ann_json, dst_project_meta, key_id_map=KeyIdMap()
|
|
359
|
+
)
|
|
244
360
|
except Exception as e:
|
|
245
361
|
raise RuntimeError("Failed to validate Annotation") from e
|
|
246
362
|
self.append(dst_id, ann)
|
|
@@ -5,8 +5,10 @@ import asyncio
|
|
|
5
5
|
import datetime
|
|
6
6
|
import json
|
|
7
7
|
import os
|
|
8
|
+
import re
|
|
8
9
|
import urllib.parse
|
|
9
10
|
from functools import partial
|
|
11
|
+
from itertools import zip_longest
|
|
10
12
|
from typing import (
|
|
11
13
|
AsyncGenerator,
|
|
12
14
|
Callable,
|
|
@@ -23,7 +25,11 @@ from typing import (
|
|
|
23
25
|
import aiofiles
|
|
24
26
|
from numerize.numerize import numerize
|
|
25
27
|
from requests import Response
|
|
26
|
-
from requests_toolbelt import
|
|
28
|
+
from requests_toolbelt import (
|
|
29
|
+
MultipartDecoder,
|
|
30
|
+
MultipartEncoder,
|
|
31
|
+
MultipartEncoderMonitor,
|
|
32
|
+
)
|
|
27
33
|
from tqdm import tqdm
|
|
28
34
|
|
|
29
35
|
import supervisely.io.fs as sly_fs
|
|
@@ -46,6 +52,7 @@ from supervisely.io.fs import (
|
|
|
46
52
|
get_file_hash,
|
|
47
53
|
get_file_hash_async,
|
|
48
54
|
get_file_hash_chunked,
|
|
55
|
+
get_file_hash_chunked_async,
|
|
49
56
|
get_file_name_with_ext,
|
|
50
57
|
get_file_size,
|
|
51
58
|
list_files,
|
|
@@ -700,7 +707,7 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
700
707
|
return project_id, dataset_id
|
|
701
708
|
|
|
702
709
|
def upload_hash(
|
|
703
|
-
self, dataset_id: int, name: str, hash: str, stream_index: Optional[int] = None
|
|
710
|
+
self, dataset_id: int, name: str, hash: str, stream_index: Optional[int] = None, metadata: Optional[Dict] = None
|
|
704
711
|
) -> VideoInfo:
|
|
705
712
|
"""
|
|
706
713
|
Upload Video from given hash to Dataset.
|
|
@@ -713,6 +720,8 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
713
720
|
:type hash: str
|
|
714
721
|
:param stream_index: Index of video stream.
|
|
715
722
|
:type stream_index: int, optional
|
|
723
|
+
:param metadata: Video metadata.
|
|
724
|
+
:type metadata: dict, optional
|
|
716
725
|
:return: Information about Video. See :class:`info_sequence<info_sequence>`
|
|
717
726
|
:rtype: :class:`VideoInfo`
|
|
718
727
|
:Usage example:
|
|
@@ -781,6 +790,8 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
781
790
|
meta = {}
|
|
782
791
|
if stream_index is not None and type(stream_index) is int:
|
|
783
792
|
meta = {"videoStreamIndex": stream_index}
|
|
793
|
+
if metadata is not None:
|
|
794
|
+
meta.update(metadata)
|
|
784
795
|
return self.upload_hashes(dataset_id, [name], [hash], [meta])[0]
|
|
785
796
|
|
|
786
797
|
def upload_hashes(
|
|
@@ -1106,10 +1117,10 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
1106
1117
|
validate_ext(os.path.splitext(name)[1])
|
|
1107
1118
|
|
|
1108
1119
|
for batch in batched(list(zip(names, items, metas))):
|
|
1109
|
-
|
|
1120
|
+
videos = []
|
|
1110
1121
|
for name, item, meta in batch:
|
|
1111
1122
|
item_tuple = func_item_to_kv(item)
|
|
1112
|
-
|
|
1123
|
+
videos.append(
|
|
1113
1124
|
{
|
|
1114
1125
|
"title": name,
|
|
1115
1126
|
item_tuple[0]: item_tuple[1],
|
|
@@ -1120,12 +1131,12 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
1120
1131
|
"videos.bulk.add",
|
|
1121
1132
|
{
|
|
1122
1133
|
ApiField.DATASET_ID: dataset_id,
|
|
1123
|
-
ApiField.VIDEOS:
|
|
1134
|
+
ApiField.VIDEOS: videos,
|
|
1124
1135
|
ApiField.FORCE_METADATA_FOR_LINKS: force_metadata_for_links,
|
|
1125
1136
|
},
|
|
1126
1137
|
)
|
|
1127
1138
|
if progress_cb is not None:
|
|
1128
|
-
progress_cb(len(
|
|
1139
|
+
progress_cb(len(videos))
|
|
1129
1140
|
|
|
1130
1141
|
results = [self._convert_json_info(item) for item in response.json()]
|
|
1131
1142
|
name_to_res = {img_info.name: img_info for img_info in results}
|
|
@@ -1186,6 +1197,41 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
1186
1197
|
if progress_cb is not None:
|
|
1187
1198
|
progress_cb(len(chunk))
|
|
1188
1199
|
|
|
1200
|
+
def download_frames(
|
|
1201
|
+
self, video_id: int, frames: List[int], paths: List[str], progress_cb=None
|
|
1202
|
+
) -> None:
|
|
1203
|
+
endpoint = "videos.bulk.download-frame"
|
|
1204
|
+
response: Response = self._api.get(
|
|
1205
|
+
endpoint,
|
|
1206
|
+
params={},
|
|
1207
|
+
data={ApiField.VIDEO_ID: video_id, ApiField.FRAMES: frames},
|
|
1208
|
+
stream=True,
|
|
1209
|
+
)
|
|
1210
|
+
response.raise_for_status()
|
|
1211
|
+
|
|
1212
|
+
files = {frame_n: None for frame_n in frames}
|
|
1213
|
+
file_paths = {frame_n: path for frame_n, path in zip(frames, paths)}
|
|
1214
|
+
|
|
1215
|
+
try:
|
|
1216
|
+
decoder = MultipartDecoder.from_response(response)
|
|
1217
|
+
for part in decoder.parts:
|
|
1218
|
+
content_utf8 = part.headers[b"Content-Disposition"].decode("utf-8")
|
|
1219
|
+
# Find name="1245" preceded by a whitespace, semicolon or beginning of line.
|
|
1220
|
+
# The regex has 2 capture group: one for the prefix and one for the actual name value.
|
|
1221
|
+
frame_n = int(re.findall(r'(^|[\s;])name="(\d*)"', content_utf8)[0][1])
|
|
1222
|
+
if files[frame_n] is None:
|
|
1223
|
+
file_path = file_paths[frame_n]
|
|
1224
|
+
files[frame_n] = open(file_path, "wb")
|
|
1225
|
+
if progress_cb is not None:
|
|
1226
|
+
progress_cb(1)
|
|
1227
|
+
f = files[frame_n]
|
|
1228
|
+
f.write(part.content)
|
|
1229
|
+
|
|
1230
|
+
finally:
|
|
1231
|
+
for f in files.values():
|
|
1232
|
+
if f is not None:
|
|
1233
|
+
f.close()
|
|
1234
|
+
|
|
1189
1235
|
def download_range_by_id(
|
|
1190
1236
|
self,
|
|
1191
1237
|
id: int,
|
|
@@ -1536,15 +1582,20 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
1536
1582
|
for hash_value, meta in zip(unique_hashes, unique_metas):
|
|
1537
1583
|
hash_meta_dict[hash_value] = meta
|
|
1538
1584
|
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
metas2 = [meta["meta"] for meta in metas]
|
|
1542
|
-
|
|
1585
|
+
video_metadatas = [hash_meta_dict[hash_value] for hash_value in hashes]
|
|
1586
|
+
video_metadatas2 = [meta["meta"] for meta in video_metadatas]
|
|
1543
1587
|
names = self.get_free_names(dataset_id, names)
|
|
1544
1588
|
|
|
1545
|
-
|
|
1589
|
+
if metas is None:
|
|
1590
|
+
metas = [None] * len(names)
|
|
1591
|
+
if not isinstance(metas, list):
|
|
1592
|
+
raise ValueError("metas must be a list")
|
|
1593
|
+
|
|
1594
|
+
for name, hash, video_metadata, metadata in zip_longest(
|
|
1595
|
+
names, hashes, video_metadatas2, metas
|
|
1596
|
+
):
|
|
1546
1597
|
try:
|
|
1547
|
-
all_streams =
|
|
1598
|
+
all_streams = video_metadata["streams"]
|
|
1548
1599
|
video_streams = get_video_streams(all_streams)
|
|
1549
1600
|
for stream_info in video_streams:
|
|
1550
1601
|
stream_index = stream_info["index"]
|
|
@@ -1559,7 +1610,7 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
1559
1610
|
# info = self._api.video.get_info_by_name(dataset_id, item_name)
|
|
1560
1611
|
# if info is not None:
|
|
1561
1612
|
# item_name = gen_video_stream_name(name, stream_index)
|
|
1562
|
-
res = self.upload_hash(dataset_id, name, hash, stream_index)
|
|
1613
|
+
res = self.upload_hash(dataset_id, name, hash, stream_index, metadata)
|
|
1563
1614
|
video_info_results.append(res)
|
|
1564
1615
|
except Exception as e:
|
|
1565
1616
|
from supervisely.io.exception_handlers import (
|
|
@@ -2531,7 +2582,7 @@ class VideoApi(RemoveableBulkModuleApi):
|
|
|
2531
2582
|
progress_cb(len(chunk))
|
|
2532
2583
|
if check_hash:
|
|
2533
2584
|
if hash_to_check is not None:
|
|
2534
|
-
downloaded_file_hash = await
|
|
2585
|
+
downloaded_file_hash = await get_file_hash_chunked_async(path)
|
|
2535
2586
|
if hash_to_check != downloaded_file_hash:
|
|
2536
2587
|
raise RuntimeError(
|
|
2537
2588
|
f"Downloaded hash of video with ID:{id} does not match the expected hash: {downloaded_file_hash} != {hash_to_check}"
|
supervisely/app/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from fastapi import FastAPI
|
|
2
2
|
from supervisely.app.content import StateJson, DataJson
|
|
3
3
|
from supervisely.app.content import get_data_dir, get_synced_data_dir
|
|
4
|
-
from supervisely.app.fastapi.subapp import call_on_autostart
|
|
4
|
+
from supervisely.app.fastapi.subapp import call_on_autostart, session_user_api
|
|
5
5
|
import supervisely.app.fastapi as fastapi
|
|
6
6
|
import supervisely.app.widgets as widgets
|
|
7
7
|
import supervisely.app.development as development
|
supervisely/app/content.py
CHANGED
|
@@ -11,12 +11,14 @@ import threading
|
|
|
11
11
|
import time
|
|
12
12
|
import traceback
|
|
13
13
|
from concurrent.futures import ThreadPoolExecutor
|
|
14
|
+
from typing import Optional, Union
|
|
14
15
|
|
|
15
16
|
import jsonpatch
|
|
16
17
|
from fastapi import Request
|
|
17
18
|
|
|
18
19
|
from supervisely._utils import is_production
|
|
19
20
|
from supervisely.api.api import Api
|
|
21
|
+
import supervisely.app.fastapi.multi_user as multi_user
|
|
20
22
|
from supervisely.app.fastapi import run_sync
|
|
21
23
|
from supervisely.app.fastapi.websocket import WebsocketManager
|
|
22
24
|
from supervisely.app.singleton import Singleton
|
|
@@ -109,16 +111,29 @@ class _PatchableJson(dict):
|
|
|
109
111
|
patch.apply(self._last, in_place=True)
|
|
110
112
|
self._last = copy.deepcopy(self._last)
|
|
111
113
|
|
|
112
|
-
async def synchronize_changes(self):
|
|
114
|
+
async def synchronize_changes(self, user_id: Optional[Union[int, str]] = None):
|
|
113
115
|
patch = self._get_patch()
|
|
114
|
-
|
|
115
|
-
|
|
116
|
+
if user_id is not None:
|
|
117
|
+
async with multi_user.async_session_context(user_id):
|
|
118
|
+
await self._apply_patch(patch)
|
|
119
|
+
await self._ws.broadcast(
|
|
120
|
+
self.get_changes(patch), user_id=user_id
|
|
121
|
+
)
|
|
122
|
+
else:
|
|
123
|
+
await self._apply_patch(patch)
|
|
124
|
+
await self._ws.broadcast(self.get_changes(patch), user_id=user_id)
|
|
116
125
|
|
|
117
126
|
async def send_changes_async(self):
|
|
118
|
-
|
|
127
|
+
user_id = None
|
|
128
|
+
if sly_env.is_multiuser_mode_enabled():
|
|
129
|
+
user_id = sly_env.user_from_multiuser_app()
|
|
130
|
+
await self.synchronize_changes(user_id=user_id)
|
|
119
131
|
|
|
120
132
|
def send_changes(self):
|
|
121
|
-
|
|
133
|
+
user_id = None
|
|
134
|
+
if sly_env.is_multiuser_mode_enabled():
|
|
135
|
+
user_id = sly_env.user_from_multiuser_app()
|
|
136
|
+
run_sync(self.synchronize_changes(user_id=user_id))
|
|
122
137
|
|
|
123
138
|
def raise_for_key(self, key: str):
|
|
124
139
|
if key in self:
|
|
@@ -139,7 +154,7 @@ class StateJson(_PatchableJson, metaclass=Singleton):
|
|
|
139
154
|
await StateJson._replace_global(dict(self))
|
|
140
155
|
|
|
141
156
|
@classmethod
|
|
142
|
-
async def from_request(cls, request: Request) -> StateJson:
|
|
157
|
+
async def from_request(cls, request: Request, local: bool = True) -> StateJson:
|
|
143
158
|
if "application/json" not in request.headers.get("Content-Type", ""):
|
|
144
159
|
return None
|
|
145
160
|
content = await request.json()
|
|
@@ -149,7 +164,8 @@ class StateJson(_PatchableJson, metaclass=Singleton):
|
|
|
149
164
|
# TODO: should we always replace STATE with {}?
|
|
150
165
|
d = content.get(Field.STATE, {})
|
|
151
166
|
await cls._replace_global(d)
|
|
152
|
-
|
|
167
|
+
|
|
168
|
+
return cls(d, __local__=local)
|
|
153
169
|
|
|
154
170
|
@classmethod
|
|
155
171
|
async def _replace_global(cls, d: dict):
|
|
@@ -156,7 +156,10 @@ def supervisely_vpn_network(
|
|
|
156
156
|
|
|
157
157
|
|
|
158
158
|
def create_debug_task(
|
|
159
|
-
team_id: int = None,
|
|
159
|
+
team_id: int = None,
|
|
160
|
+
port: int = 8000,
|
|
161
|
+
update_status: bool = True,
|
|
162
|
+
project_id: Optional[int] = None,
|
|
160
163
|
) -> Dict[str, Any]:
|
|
161
164
|
"""Gets or creates a debug task for the current user.
|
|
162
165
|
|
|
@@ -167,6 +170,8 @@ def create_debug_task(
|
|
|
167
170
|
:type port: int
|
|
168
171
|
:param update_status: If True, the task status will be updated to STARTED.
|
|
169
172
|
:type update_status: bool
|
|
173
|
+
:param project_id: Project ID to filter existing debug tasks. Creates a new task if no match is found. Default is None.
|
|
174
|
+
:type project_id: Optional[int]
|
|
170
175
|
:return: The task details.
|
|
171
176
|
:rtype: Dict[str, Any]
|
|
172
177
|
"""
|
|
@@ -189,6 +194,10 @@ def create_debug_task(
|
|
|
189
194
|
if (session.details["meta"].get("redirectRequests") == redirect_requests) and (
|
|
190
195
|
session.details["status"] in [str(api.app.Status.QUEUED), str(api.app.Status.STARTED)]
|
|
191
196
|
):
|
|
197
|
+
if project_id is not None:
|
|
198
|
+
state = session.details["meta"].get("params", {}).get("state", {})
|
|
199
|
+
if state.get("slyProjectId") != project_id:
|
|
200
|
+
continue # project_id not set in state, skip this session
|
|
192
201
|
task = session.details
|
|
193
202
|
if "id" not in task:
|
|
194
203
|
task["id"] = task["taskId"]
|
|
@@ -196,6 +205,7 @@ def create_debug_task(
|
|
|
196
205
|
break
|
|
197
206
|
workspaces = api.workspace.get_list(team_id)
|
|
198
207
|
if task is None:
|
|
208
|
+
params = {"state": {"slyProjectId": project_id}} if project_id is not None else None
|
|
199
209
|
task = api.task.start(
|
|
200
210
|
agent_id=None,
|
|
201
211
|
module_id=module_id,
|
|
@@ -203,6 +213,7 @@ def create_debug_task(
|
|
|
203
213
|
task_name=session_name,
|
|
204
214
|
redirect_requests=redirect_requests,
|
|
205
215
|
proxy_keep_url=False, # to ignore /net/<token>/endpoint
|
|
216
|
+
params=params,
|
|
206
217
|
)
|
|
207
218
|
if type(task) is list:
|
|
208
219
|
task = task[0]
|
|
@@ -222,6 +233,7 @@ def enable_advanced_debug(
|
|
|
222
233
|
vpn_action: Literal["up", "down"] = "up",
|
|
223
234
|
vpn_raise_on_error: bool = True,
|
|
224
235
|
only_for_development: bool = True,
|
|
236
|
+
project_id: Optional[int] = None,
|
|
225
237
|
) -> Optional[int]:
|
|
226
238
|
"""Enables advanced debugging for the app.
|
|
227
239
|
At first, it establishes a WireGuard VPN connection to the Supervisely network.
|
|
@@ -244,6 +256,8 @@ def enable_advanced_debug(
|
|
|
244
256
|
:param only_for_development: If True, the debugging will be started only if the app is running in development mode.
|
|
245
257
|
It's not recommended to set this parameter to False in production environments.
|
|
246
258
|
:type only_for_development: bool
|
|
259
|
+
:param project_id: Project ID to filter existing debug tasks. Creates a new task if no match is found. Default is None.
|
|
260
|
+
:type project_id: Optional[int]
|
|
247
261
|
:return: The task ID of the debug task or None if the debugging was not started.
|
|
248
262
|
:rtype: Optional[int]
|
|
249
263
|
|
|
@@ -285,7 +299,9 @@ def enable_advanced_debug(
|
|
|
285
299
|
)
|
|
286
300
|
|
|
287
301
|
supervisely_vpn_network(action=vpn_action, raise_on_error=vpn_raise_on_error)
|
|
288
|
-
task = create_debug_task(
|
|
302
|
+
task = create_debug_task(
|
|
303
|
+
team_id=team_id, port=port, update_status=update_status, project_id=project_id
|
|
304
|
+
)
|
|
289
305
|
task_id = task.get("id", None)
|
|
290
306
|
|
|
291
307
|
logger.debug(
|
|
@@ -42,7 +42,7 @@ class CustomStaticFiles(StaticFiles):
|
|
|
42
42
|
def _get_range_header(range_header: str, file_size: int) -> typing.Tuple[int, int]:
|
|
43
43
|
def _invalid_range():
|
|
44
44
|
return HTTPException(
|
|
45
|
-
status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE,
|
|
45
|
+
status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE, #TODO: change to status.HTTP_416_RANGE_NOT_SATISFIABLE if update starlette to 0.48.0+
|
|
46
46
|
detail=f"Invalid request range (Range:{range_header!r})",
|
|
47
47
|
)
|
|
48
48
|
|