supervisely 6.73.452__py3-none-any.whl → 6.73.513__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (189) hide show
  1. supervisely/__init__.py +25 -1
  2. supervisely/annotation/annotation.py +8 -2
  3. supervisely/annotation/json_geometries_map.py +13 -12
  4. supervisely/api/annotation_api.py +6 -3
  5. supervisely/api/api.py +2 -0
  6. supervisely/api/app_api.py +10 -1
  7. supervisely/api/dataset_api.py +74 -12
  8. supervisely/api/entities_collection_api.py +10 -0
  9. supervisely/api/entity_annotation/figure_api.py +28 -0
  10. supervisely/api/entity_annotation/object_api.py +3 -3
  11. supervisely/api/entity_annotation/tag_api.py +63 -12
  12. supervisely/api/guides_api.py +210 -0
  13. supervisely/api/image_api.py +4 -0
  14. supervisely/api/labeling_job_api.py +83 -1
  15. supervisely/api/labeling_queue_api.py +33 -7
  16. supervisely/api/module_api.py +5 -0
  17. supervisely/api/project_api.py +71 -26
  18. supervisely/api/storage_api.py +3 -1
  19. supervisely/api/task_api.py +13 -2
  20. supervisely/api/team_api.py +4 -3
  21. supervisely/api/video/video_annotation_api.py +119 -3
  22. supervisely/api/video/video_api.py +65 -14
  23. supervisely/app/__init__.py +1 -1
  24. supervisely/app/content.py +23 -7
  25. supervisely/app/development/development.py +18 -2
  26. supervisely/app/fastapi/__init__.py +1 -0
  27. supervisely/app/fastapi/custom_static_files.py +1 -1
  28. supervisely/app/fastapi/multi_user.py +105 -0
  29. supervisely/app/fastapi/subapp.py +88 -42
  30. supervisely/app/fastapi/websocket.py +77 -9
  31. supervisely/app/singleton.py +21 -0
  32. supervisely/app/v1/app_service.py +18 -2
  33. supervisely/app/v1/constants.py +7 -1
  34. supervisely/app/widgets/__init__.py +6 -0
  35. supervisely/app/widgets/activity_feed/__init__.py +0 -0
  36. supervisely/app/widgets/activity_feed/activity_feed.py +239 -0
  37. supervisely/app/widgets/activity_feed/style.css +78 -0
  38. supervisely/app/widgets/activity_feed/template.html +22 -0
  39. supervisely/app/widgets/card/card.py +20 -0
  40. supervisely/app/widgets/classes_list_selector/classes_list_selector.py +121 -9
  41. supervisely/app/widgets/classes_list_selector/template.html +60 -93
  42. supervisely/app/widgets/classes_mapping/classes_mapping.py +13 -12
  43. supervisely/app/widgets/classes_table/classes_table.py +1 -0
  44. supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
  45. supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +1 -1
  46. supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
  47. supervisely/app/widgets/fast_table/fast_table.py +184 -60
  48. supervisely/app/widgets/fast_table/template.html +1 -1
  49. supervisely/app/widgets/heatmap/__init__.py +0 -0
  50. supervisely/app/widgets/heatmap/heatmap.py +564 -0
  51. supervisely/app/widgets/heatmap/script.js +533 -0
  52. supervisely/app/widgets/heatmap/style.css +233 -0
  53. supervisely/app/widgets/heatmap/template.html +21 -0
  54. supervisely/app/widgets/modal/__init__.py +0 -0
  55. supervisely/app/widgets/modal/modal.py +198 -0
  56. supervisely/app/widgets/modal/template.html +10 -0
  57. supervisely/app/widgets/object_class_view/object_class_view.py +3 -0
  58. supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
  59. supervisely/app/widgets/radio_tabs/template.html +1 -0
  60. supervisely/app/widgets/select/select.py +6 -3
  61. supervisely/app/widgets/select_class/__init__.py +0 -0
  62. supervisely/app/widgets/select_class/select_class.py +363 -0
  63. supervisely/app/widgets/select_class/template.html +50 -0
  64. supervisely/app/widgets/select_cuda/select_cuda.py +22 -0
  65. supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +65 -7
  66. supervisely/app/widgets/select_tag/__init__.py +0 -0
  67. supervisely/app/widgets/select_tag/select_tag.py +352 -0
  68. supervisely/app/widgets/select_tag/template.html +64 -0
  69. supervisely/app/widgets/select_team/select_team.py +37 -4
  70. supervisely/app/widgets/select_team/template.html +4 -5
  71. supervisely/app/widgets/select_user/__init__.py +0 -0
  72. supervisely/app/widgets/select_user/select_user.py +270 -0
  73. supervisely/app/widgets/select_user/template.html +13 -0
  74. supervisely/app/widgets/select_workspace/select_workspace.py +59 -10
  75. supervisely/app/widgets/select_workspace/template.html +9 -12
  76. supervisely/app/widgets/table/table.py +68 -13
  77. supervisely/app/widgets/tree_select/tree_select.py +2 -0
  78. supervisely/aug/aug.py +6 -2
  79. supervisely/convert/base_converter.py +1 -0
  80. supervisely/convert/converter.py +2 -2
  81. supervisely/convert/image/image_converter.py +3 -1
  82. supervisely/convert/image/image_helper.py +48 -4
  83. supervisely/convert/image/label_studio/label_studio_converter.py +2 -0
  84. supervisely/convert/image/medical2d/medical2d_helper.py +2 -24
  85. supervisely/convert/image/multispectral/multispectral_converter.py +6 -0
  86. supervisely/convert/image/pascal_voc/pascal_voc_converter.py +8 -5
  87. supervisely/convert/image/pascal_voc/pascal_voc_helper.py +7 -0
  88. supervisely/convert/pointcloud/kitti_3d/kitti_3d_converter.py +33 -3
  89. supervisely/convert/pointcloud/kitti_3d/kitti_3d_helper.py +12 -5
  90. supervisely/convert/pointcloud/las/las_converter.py +13 -1
  91. supervisely/convert/pointcloud/las/las_helper.py +110 -11
  92. supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
  93. supervisely/convert/pointcloud/pointcloud_converter.py +91 -3
  94. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
  95. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
  96. supervisely/convert/video/__init__.py +1 -0
  97. supervisely/convert/video/multi_view/__init__.py +0 -0
  98. supervisely/convert/video/multi_view/multi_view.py +543 -0
  99. supervisely/convert/video/sly/sly_video_converter.py +359 -3
  100. supervisely/convert/video/video_converter.py +22 -2
  101. supervisely/convert/volume/dicom/dicom_converter.py +13 -5
  102. supervisely/convert/volume/dicom/dicom_helper.py +30 -18
  103. supervisely/geometry/constants.py +1 -0
  104. supervisely/geometry/geometry.py +4 -0
  105. supervisely/geometry/helpers.py +5 -1
  106. supervisely/geometry/oriented_bbox.py +676 -0
  107. supervisely/geometry/rectangle.py +2 -1
  108. supervisely/io/env.py +76 -1
  109. supervisely/io/fs.py +21 -0
  110. supervisely/nn/benchmark/base_evaluator.py +104 -11
  111. supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -8
  112. supervisely/nn/benchmark/object_detection/evaluator.py +20 -4
  113. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +10 -5
  114. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +34 -16
  115. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +1 -1
  116. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +1 -1
  117. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -1
  118. supervisely/nn/benchmark/visualization/evaluation_result.py +66 -4
  119. supervisely/nn/inference/cache.py +43 -18
  120. supervisely/nn/inference/gui/serving_gui_template.py +5 -2
  121. supervisely/nn/inference/inference.py +795 -199
  122. supervisely/nn/inference/inference_request.py +42 -9
  123. supervisely/nn/inference/predict_app/gui/classes_selector.py +83 -12
  124. supervisely/nn/inference/predict_app/gui/gui.py +676 -488
  125. supervisely/nn/inference/predict_app/gui/input_selector.py +205 -26
  126. supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
  127. supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
  128. supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
  129. supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
  130. supervisely/nn/inference/predict_app/gui/utils.py +236 -119
  131. supervisely/nn/inference/predict_app/predict_app.py +2 -2
  132. supervisely/nn/inference/session.py +43 -35
  133. supervisely/nn/inference/tracking/bbox_tracking.py +113 -34
  134. supervisely/nn/inference/tracking/tracker_interface.py +7 -2
  135. supervisely/nn/inference/uploader.py +139 -12
  136. supervisely/nn/live_training/__init__.py +7 -0
  137. supervisely/nn/live_training/api_server.py +111 -0
  138. supervisely/nn/live_training/artifacts_utils.py +243 -0
  139. supervisely/nn/live_training/checkpoint_utils.py +229 -0
  140. supervisely/nn/live_training/dynamic_sampler.py +44 -0
  141. supervisely/nn/live_training/helpers.py +14 -0
  142. supervisely/nn/live_training/incremental_dataset.py +146 -0
  143. supervisely/nn/live_training/live_training.py +497 -0
  144. supervisely/nn/live_training/loss_plateau_detector.py +111 -0
  145. supervisely/nn/live_training/request_queue.py +52 -0
  146. supervisely/nn/model/model_api.py +9 -0
  147. supervisely/nn/prediction_dto.py +12 -1
  148. supervisely/nn/tracker/base_tracker.py +11 -1
  149. supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
  150. supervisely/nn/tracker/botsort/tracker/mc_bot_sort.py +7 -4
  151. supervisely/nn/tracker/botsort_tracker.py +94 -65
  152. supervisely/nn/tracker/visualize.py +87 -90
  153. supervisely/nn/training/gui/classes_selector.py +16 -1
  154. supervisely/nn/training/train_app.py +28 -29
  155. supervisely/project/data_version.py +115 -51
  156. supervisely/project/download.py +1 -1
  157. supervisely/project/pointcloud_episode_project.py +37 -8
  158. supervisely/project/pointcloud_project.py +30 -2
  159. supervisely/project/project.py +14 -2
  160. supervisely/project/project_meta.py +27 -1
  161. supervisely/project/project_settings.py +32 -18
  162. supervisely/project/versioning/__init__.py +1 -0
  163. supervisely/project/versioning/common.py +20 -0
  164. supervisely/project/versioning/schema_fields.py +35 -0
  165. supervisely/project/versioning/video_schema.py +221 -0
  166. supervisely/project/versioning/volume_schema.py +87 -0
  167. supervisely/project/video_project.py +717 -15
  168. supervisely/project/volume_project.py +623 -5
  169. supervisely/template/experiment/experiment.html.jinja +4 -4
  170. supervisely/template/experiment/experiment_generator.py +14 -21
  171. supervisely/template/live_training/__init__.py +0 -0
  172. supervisely/template/live_training/header.html.jinja +96 -0
  173. supervisely/template/live_training/live_training.html.jinja +51 -0
  174. supervisely/template/live_training/live_training_generator.py +464 -0
  175. supervisely/template/live_training/sly-style.css +402 -0
  176. supervisely/template/live_training/template.html.jinja +18 -0
  177. supervisely/versions.json +28 -26
  178. supervisely/video/sampling.py +39 -20
  179. supervisely/video/video.py +40 -11
  180. supervisely/video_annotation/video_object.py +29 -4
  181. supervisely/volume/stl_converter.py +2 -0
  182. supervisely/worker_api/agent_rpc.py +24 -1
  183. supervisely/worker_api/rpc_servicer.py +31 -7
  184. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/METADATA +56 -39
  185. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/RECORD +189 -142
  186. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/WHEEL +1 -1
  187. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/entry_points.txt +0 -0
  188. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info/licenses}/LICENSE +0 -0
  189. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/top_level.txt +0 -0
@@ -1,19 +1,24 @@
1
1
  # coding: utf-8
2
2
 
3
- # docs
4
3
  from __future__ import annotations
5
4
 
6
5
  import asyncio
6
+ import io
7
+ import json
7
8
  import os
9
+ import tarfile
10
+ import tempfile
8
11
  from collections import namedtuple
9
12
  from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
10
13
 
14
+ import zstd
11
15
  from tqdm import tqdm
12
16
 
13
- from supervisely._utils import batched
17
+ from supervisely._utils import batched, logger
14
18
  from supervisely.api.api import Api
15
19
  from supervisely.api.dataset_api import DatasetInfo
16
20
  from supervisely.api.module_api import ApiField
21
+ from supervisely.api.project_api import ProjectInfo
17
22
  from supervisely.api.video.video_api import VideoInfo
18
23
  from supervisely.collection.key_indexed_collection import KeyIndexedCollection
19
24
  from supervisely.io.fs import clean_dir, mkdir, touch, touch_async
@@ -21,8 +26,13 @@ from supervisely.io.json import dump_json_file, dump_json_file_async, load_json_
21
26
  from supervisely.project.project import Dataset, OpenMode, Project
22
27
  from supervisely.project.project import read_single_project as read_project_wrapper
23
28
  from supervisely.project.project_meta import ProjectMeta
29
+ from supervisely.project.project_settings import LabelingInterface
24
30
  from supervisely.project.project_type import ProjectType
25
- from supervisely.sly_logger import logger
31
+ from supervisely.project.versioning.common import (
32
+ DEFAULT_VIDEO_SCHEMA_VERSION,
33
+ get_video_snapshot_schema,
34
+ )
35
+ from supervisely.project.versioning.schema_fields import VersionSchemaField
26
36
  from supervisely.task.progress import tqdm_sly
27
37
  from supervisely.video import video as sly_video
28
38
  from supervisely.video_annotation.key_id_map import KeyIdMap
@@ -30,11 +40,10 @@ from supervisely.video_annotation.video_annotation import VideoAnnotation
30
40
 
31
41
 
32
42
  class VideoItemPaths(NamedTuple):
33
- #: :class:`str`: Full video file path of item
34
43
  video_path: str
35
-
36
- #: :class:`str`: Full annotation file path of item
44
+ # Full video file path of item
37
45
  ann_path: str
46
+ # Full annotation file path of item
38
47
 
39
48
 
40
49
  class VideoDataset(Dataset):
@@ -63,6 +72,9 @@ class VideoDataset(Dataset):
63
72
  #: :class:`str`: Items info directory name
64
73
  item_info_dir_name = "video_info"
65
74
 
75
+ #: :class:`str`: Metadata directory name
76
+ metadata_dir_name = "metadata"
77
+
66
78
  #: :class:`str`: Segmentation masks directory name
67
79
  seg_dir_name = None
68
80
 
@@ -1036,7 +1048,7 @@ class VideoProject(Project):
1036
1048
  raise NotImplementedError(
1037
1049
  f"Static method 'get_train_val_splits_by_tag()' is not supported for VideoProject class now."
1038
1050
  )
1039
-
1051
+
1040
1052
  @staticmethod
1041
1053
  def get_train_val_splits_by_collections(
1042
1054
  project_dir: str,
@@ -1193,7 +1205,7 @@ class VideoProject(Project):
1193
1205
  api: Api,
1194
1206
  project_id: int,
1195
1207
  dest_dir: str,
1196
- semaphore: asyncio.Semaphore = None,
1208
+ semaphore: Optional[Union[asyncio.Semaphore, int]] = None,
1197
1209
  dataset_ids: List[int] = None,
1198
1210
  download_videos: bool = True,
1199
1211
  save_video_info: bool = False,
@@ -1213,7 +1225,7 @@ class VideoProject(Project):
1213
1225
  :param dest_dir: Directory to download video project.
1214
1226
  :type dest_dir: :class:`str`
1215
1227
  :param semaphore: Semaphore to limit the number of concurrent downloads of items.
1216
- :type semaphore: :class:`asyncio.Semaphore`, optional
1228
+ :type semaphore: :class:`asyncio.Semaphore` or :class:`int`, optional
1217
1229
  :param dataset_ids: Datasets IDs in Supervisely to download.
1218
1230
  :type dataset_ids: :class:`list` [ :class:`int` ], optional
1219
1231
  :param download_videos: Download videos from Supervisely video project in dest_dir or not.
@@ -1261,6 +1273,678 @@ class VideoProject(Project):
1261
1273
  **kwargs,
1262
1274
  )
1263
1275
 
1276
+ # --------------------- #
1277
+ # Video Data Versioning #
1278
+ # --------------------- #
1279
+ @staticmethod
1280
+ def download_bin(
1281
+ api: Api,
1282
+ project_id: int,
1283
+ dest_dir: Optional[str] = None,
1284
+ dataset_ids: Optional[List[int]] = None,
1285
+ batch_size: int = 50,
1286
+ log_progress: bool = True,
1287
+ progress_cb: Optional[Union[tqdm, Callable]] = None,
1288
+ return_bytesio: bool = False,
1289
+ ) -> Union[str, io.BytesIO]:
1290
+ """
1291
+ Download video project snapshot in Arrow/Parquet-based binary format.
1292
+
1293
+ Result is a .tar.zst archive containing:
1294
+ - project_info.json
1295
+ - project_meta.json
1296
+ - key_id_map.json
1297
+ - manifest.json
1298
+ - datasets.parquet
1299
+ - videos.parquet
1300
+ - objects.parquet
1301
+ - figures.parquet
1302
+
1303
+ :param api: Supervisely API client.
1304
+ :type api: Api
1305
+ :param project_id: Source project ID.
1306
+ :type project_id: int
1307
+ :param dest_dir: Directory to save the resulting ``.tar.zst`` file. Required if ``return_bytesio`` is False.
1308
+ :type dest_dir: Optional[str]
1309
+ :param dataset_ids: Optional list of dataset IDs to include. If provided, only those datasets (and their videos/annotations) will be included in the snapshot.
1310
+ :type dataset_ids: Optional[List[int]]
1311
+ :param batch_size: Batch size for downloading video annotations.
1312
+ :type batch_size: int
1313
+ :param log_progress: If True, shows progress (uses internal tqdm progress bars) when ``progress_cb`` is not provided.
1314
+ :type log_progress: bool
1315
+ :param progress_cb: Optional progress callback. Can be a ``tqdm``-like callable or a function accepting an integer increment.
1316
+ :type progress_cb: Optional[Union[tqdm, Callable]]
1317
+ :param return_bytesio: If True, return the snapshot as :class:`io.BytesIO`. If False, write the snapshot to ``dest_dir`` and return the output file path.
1318
+ :type return_bytesio: bool
1319
+ :return: Either output file path (``.tar.zst``) when ``return_bytesio`` is False, or an in-memory snapshot stream when ``return_bytesio`` is True.
1320
+ :rtype: Union[str, io.BytesIO]
1321
+ """
1322
+ if dest_dir is None and not return_bytesio:
1323
+ raise ValueError(
1324
+ "dest_dir must be specified if return_bytesio is False in VideoProject.download_bin"
1325
+ )
1326
+
1327
+ snapshot_io = VideoProject.build_snapshot(
1328
+ api,
1329
+ project_id=project_id,
1330
+ dataset_ids=dataset_ids,
1331
+ batch_size=batch_size,
1332
+ log_progress=log_progress,
1333
+ progress_cb=progress_cb,
1334
+ )
1335
+
1336
+ if return_bytesio:
1337
+ snapshot_io.seek(0)
1338
+ return snapshot_io
1339
+
1340
+ project_info = api.project.get_info_by_id(project_id)
1341
+ os.makedirs(dest_dir, exist_ok=True)
1342
+ out_path = os.path.join(
1343
+ dest_dir,
1344
+ f"{project_info.id}_{project_info.name}.tar.zst",
1345
+ )
1346
+ with open(out_path, "wb") as dst:
1347
+ dst.write(snapshot_io.read())
1348
+ return out_path
1349
+
1350
+ @staticmethod
1351
+ def upload_bin(
1352
+ api: Api,
1353
+ file: Union[str, io.BytesIO],
1354
+ workspace_id: int,
1355
+ project_name: Optional[str] = None,
1356
+ with_custom_data: bool = True,
1357
+ log_progress: bool = True,
1358
+ progress_cb: Optional[Union[tqdm, Callable]] = None,
1359
+ skip_missed: bool = False,
1360
+ ) -> "ProjectInfo":
1361
+ """
1362
+ Restore a video project from an Arrow/Parquet-based binary snapshot.
1363
+
1364
+ :param api: Supervisely API client.
1365
+ :type api: Api
1366
+ :param file: Snapshot file path (``.tar.zst``) or in-memory snapshot stream.
1367
+ :type file: Union[str, io.BytesIO]
1368
+ :param workspace_id: Target workspace ID where the project will be created.
1369
+ :type workspace_id: int
1370
+ :param project_name: Optional new project name. If not provided, the name from the snapshot will be used. If the name already exists in the workspace, a free name will be chosen.
1371
+ :type project_name: Optional[str]
1372
+ :param with_custom_data: If True, restore project/dataset/video custom data (when present in the snapshot).
1373
+ :type with_custom_data: bool
1374
+ :param log_progress: If True, shows progress (uses internal tqdm progress bars) when ``progress_cb`` is not provided.
1375
+ :type log_progress: bool
1376
+ :param progress_cb: Optional progress callback. Can be a ``tqdm``-like callable or a function accepting an integer increment.
1377
+ :type progress_cb: Optional[Union[tqdm, Callable]]
1378
+ :param skip_missed: If True, skip videos that are missing on server when restoring by hash.
1379
+ :type skip_missed: bool
1380
+ :return: Info of the newly created project.
1381
+ :rtype: ProjectInfo
1382
+ """
1383
+ if isinstance(file, io.BytesIO):
1384
+ snapshot_bytes = file.getvalue()
1385
+ else:
1386
+ with open(file, "rb") as f:
1387
+ snapshot_bytes = f.read()
1388
+
1389
+ return VideoProject.restore_snapshot(
1390
+ api,
1391
+ snapshot_bytes=snapshot_bytes,
1392
+ workspace_id=workspace_id,
1393
+ project_name=project_name,
1394
+ with_custom_data=with_custom_data,
1395
+ log_progress=log_progress,
1396
+ progress_cb=progress_cb,
1397
+ skip_missed=skip_missed,
1398
+ )
1399
+
1400
+ @staticmethod
1401
+ def build_snapshot(
1402
+ api: Api,
1403
+ project_id: int,
1404
+ dataset_ids: Optional[List[int]] = None,
1405
+ batch_size: int = 50,
1406
+ log_progress: bool = True,
1407
+ progress_cb: Optional[Union[tqdm, Callable]] = None,
1408
+ schema_version: str = DEFAULT_VIDEO_SCHEMA_VERSION,
1409
+ ) -> io.BytesIO:
1410
+ """
1411
+ Create a video project snapshot in Arrow/Parquet+tar.zst format and return it as BytesIO.
1412
+ """
1413
+ try:
1414
+ import pyarrow # pylint: disable=import-error
1415
+ import pyarrow.parquet as parquet # pylint: disable=import-error
1416
+ except Exception as e:
1417
+ raise RuntimeError(
1418
+ "pyarrow is required to build video snapshot. Please install pyarrow."
1419
+ ) from e
1420
+
1421
+ project_info = api.project.get_info_by_id(project_id)
1422
+ meta = ProjectMeta.from_json(api.project.get_meta(project_id, with_settings=True))
1423
+ key_id_map = KeyIdMap()
1424
+ snapshot_schema = get_video_snapshot_schema(schema_version)
1425
+
1426
+ tmp_root = tempfile.mkdtemp()
1427
+ payload_dir = os.path.join(tmp_root, "payload")
1428
+ mkdir(payload_dir)
1429
+
1430
+ try:
1431
+ # project_info / meta
1432
+ proj_info_path = os.path.join(payload_dir, "project_info.json")
1433
+ dump_json_file(project_info._asdict(), proj_info_path)
1434
+
1435
+ proj_meta_path = os.path.join(payload_dir, "project_meta.json")
1436
+ dump_json_file(meta.to_json(), proj_meta_path)
1437
+
1438
+ datasets_rows: List[dict] = []
1439
+ videos_rows: List[dict] = []
1440
+ objects_rows: List[dict] = []
1441
+ figures_rows: List[dict] = []
1442
+
1443
+ dataset_ids_filter = set(dataset_ids) if dataset_ids is not None else None
1444
+
1445
+ # api.dataset.tree() doesn't include custom_data
1446
+ ds_custom_data_by_id: Dict[int, dict] = {}
1447
+ try:
1448
+ for ds in api.dataset.get_list(
1449
+ project_id, recursive=True, include_custom_data=True
1450
+ ):
1451
+ if getattr(ds, "custom_data", None) is not None:
1452
+ ds_custom_data_by_id[ds.id] = ds.custom_data
1453
+ except Exception:
1454
+ ds_custom_data_by_id = {}
1455
+
1456
+ for parents, ds_info in api.dataset.tree(project_id):
1457
+ if dataset_ids_filter is not None and ds_info.id not in dataset_ids_filter:
1458
+ continue
1459
+
1460
+ full_path = Dataset._get_dataset_path(ds_info.name, parents)
1461
+ ds_custom_data = ds_custom_data_by_id.get(ds_info.id)
1462
+ datasets_rows.append(
1463
+ snapshot_schema.dataset_row_from_ds_info(
1464
+ ds_info, full_path=full_path, custom_data=ds_custom_data
1465
+ )
1466
+ )
1467
+
1468
+ videos = api.video.get_list(ds_info.id)
1469
+ ds_progress = progress_cb
1470
+ if log_progress and progress_cb is None:
1471
+ ds_progress = tqdm_sly(
1472
+ desc=f"Collecting videos from '{ds_info.name}'",
1473
+ total=len(videos),
1474
+ )
1475
+
1476
+ for batch in batched(videos, batch_size):
1477
+ video_ids = [v.id for v in batch]
1478
+ ann_jsons = api.video.annotation.download_bulk(ds_info.id, video_ids)
1479
+
1480
+ for video_info, ann_json in zip(batch, ann_jsons):
1481
+ if video_info.name != ann_json[ApiField.VIDEO_NAME]:
1482
+ raise RuntimeError(
1483
+ "Error in api.video.annotation.download_bulk: broken order"
1484
+ )
1485
+
1486
+ videos_rows.append(
1487
+ snapshot_schema.video_row_from_video_info(
1488
+ video_info, src_dataset_id=ds_info.id, ann_json=ann_json
1489
+ )
1490
+ )
1491
+
1492
+ video_ann = VideoAnnotation.from_json(ann_json, meta, key_id_map)
1493
+ obj_key_to_src_id: Dict[str, int] = {}
1494
+ for obj in video_ann.objects:
1495
+ src_obj_id = len(objects_rows) + 1
1496
+ obj_key_to_src_id[obj.key().hex] = src_obj_id
1497
+ objects_rows.append(
1498
+ snapshot_schema.object_row_from_object(
1499
+ obj, src_object_id=src_obj_id, src_video_id=video_info.id
1500
+ )
1501
+ )
1502
+
1503
+ for frame in video_ann.frames:
1504
+ for fig in frame.figures:
1505
+ parent_key = fig.parent_object.key().hex
1506
+ src_obj_id = obj_key_to_src_id.get(parent_key)
1507
+ if src_obj_id is None:
1508
+ logger.warning(
1509
+ f"Figure parent object with key '{parent_key}' "
1510
+ f"not found in objects for video '{video_info.name}'"
1511
+ )
1512
+ continue
1513
+ figures_rows.append(
1514
+ snapshot_schema.figure_row_from_figure(
1515
+ fig,
1516
+ figure_row_idx=len(figures_rows),
1517
+ src_object_id=src_obj_id,
1518
+ src_video_id=video_info.id,
1519
+ frame_index=frame.index,
1520
+ )
1521
+ )
1522
+
1523
+ if ds_progress is not None:
1524
+ ds_progress(len(batch))
1525
+
1526
+ # key_id_map.json
1527
+ key_id_map_path = os.path.join(payload_dir, "key_id_map.json")
1528
+ key_id_map.dump_json(key_id_map_path)
1529
+
1530
+ # Arrow schemas
1531
+ tables_meta = []
1532
+ datasets_schema = snapshot_schema.datasets_schema(pyarrow)
1533
+ videos_schema = snapshot_schema.videos_schema(pyarrow)
1534
+ objects_schema = snapshot_schema.objects_schema(pyarrow)
1535
+ figures_schema = snapshot_schema.figures_schema(pyarrow)
1536
+
1537
+ if datasets_rows:
1538
+ ds_table = pyarrow.Table.from_pylist(datasets_rows, schema=datasets_schema)
1539
+ ds_path = os.path.join(payload_dir, "datasets.parquet")
1540
+ parquet.write_table(ds_table, ds_path)
1541
+ tables_meta.append(
1542
+ {
1543
+ "name": "datasets",
1544
+ "path": "datasets.parquet",
1545
+ "row_count": ds_table.num_rows,
1546
+ }
1547
+ )
1548
+
1549
+ if videos_rows:
1550
+ v_table = pyarrow.Table.from_pylist(videos_rows, schema=videos_schema)
1551
+ v_path = os.path.join(payload_dir, "videos.parquet")
1552
+ parquet.write_table(v_table, v_path)
1553
+ tables_meta.append(
1554
+ {
1555
+ "name": "videos",
1556
+ "path": "videos.parquet",
1557
+ "row_count": v_table.num_rows,
1558
+ }
1559
+ )
1560
+
1561
+ if objects_rows:
1562
+ o_table = pyarrow.Table.from_pylist(objects_rows, schema=objects_schema)
1563
+ o_path = os.path.join(payload_dir, "objects.parquet")
1564
+ parquet.write_table(o_table, o_path)
1565
+ tables_meta.append(
1566
+ {
1567
+ "name": "objects",
1568
+ "path": "objects.parquet",
1569
+ "row_count": o_table.num_rows,
1570
+ }
1571
+ )
1572
+
1573
+ if figures_rows:
1574
+ f_table = pyarrow.Table.from_pylist(figures_rows, schema=figures_schema)
1575
+ f_path = os.path.join(payload_dir, "figures.parquet")
1576
+ parquet.write_table(f_table, f_path)
1577
+ tables_meta.append(
1578
+ {
1579
+ "name": "figures",
1580
+ "path": "figures.parquet",
1581
+ "row_count": f_table.num_rows,
1582
+ }
1583
+ )
1584
+
1585
+ manifest = {
1586
+ VersionSchemaField.SCHEMA_VERSION: schema_version,
1587
+ VersionSchemaField.TABLES: tables_meta,
1588
+ }
1589
+ manifest_path = os.path.join(payload_dir, "manifest.json")
1590
+ dump_json_file(manifest, manifest_path)
1591
+
1592
+ tar_path = os.path.join(tmp_root, "snapshot.tar")
1593
+ with tarfile.open(tar_path, "w") as tar:
1594
+ tar.add(payload_dir, arcname=".")
1595
+
1596
+ chunk_size = 1024 * 1024 * 50 # 50 MiB
1597
+ zst_path = os.path.join(tmp_root, "snapshot.tar.zst")
1598
+ # Try streaming compression first, fallback to single-shot
1599
+ try:
1600
+ cctx = zstd.ZstdCompressor()
1601
+ with open(tar_path, "rb") as src, open(zst_path, "wb") as dst:
1602
+ try:
1603
+ stream = cctx.stream_writer(dst, closefd=False)
1604
+ except TypeError:
1605
+ stream = cctx.stream_writer(dst)
1606
+ with stream as compressor:
1607
+ while True:
1608
+ chunk = src.read(chunk_size)
1609
+ if not chunk:
1610
+ break
1611
+ compressor.write(chunk)
1612
+ # Fallback: single-shot compression
1613
+ except Exception:
1614
+ with open(tar_path, "rb") as src, open(zst_path, "wb") as dst:
1615
+ dst.write(zstd.compress(src.read()))
1616
+
1617
+ with open(zst_path, "rb") as f:
1618
+ outio = io.BytesIO(f.read())
1619
+ outio.seek(0)
1620
+ return outio
1621
+
1622
+ finally:
1623
+ try:
1624
+ clean_dir(tmp_root)
1625
+ except Exception:
1626
+ pass
1627
+
1628
+ @staticmethod
1629
+ def restore_snapshot(
1630
+ api: Api,
1631
+ snapshot_bytes: bytes,
1632
+ workspace_id: int,
1633
+ project_name: Optional[str] = None,
1634
+ with_custom_data: bool = True,
1635
+ log_progress: bool = True,
1636
+ progress_cb: Optional[Union[tqdm, Callable]] = None,
1637
+ skip_missed: bool = False,
1638
+ ) -> ProjectInfo:
1639
+ """
1640
+ Restore a video project from a snapshot and return ProjectInfo.
1641
+ """
1642
+ try:
1643
+ import pyarrow # pylint: disable=import-error
1644
+ import pyarrow.parquet as parquet # pylint: disable=import-error
1645
+ except Exception as e:
1646
+ raise RuntimeError(
1647
+ "pyarrow is required to restore video snapshot. Please install pyarrow."
1648
+ ) from e
1649
+
1650
+ tmp_root = tempfile.mkdtemp()
1651
+ payload_dir = os.path.join(tmp_root, "payload")
1652
+ mkdir(payload_dir)
1653
+
1654
+ try:
1655
+ try:
1656
+ dctx = zstd.ZstdDecompressor()
1657
+ with dctx.stream_reader(io.BytesIO(snapshot_bytes)) as reader:
1658
+ with tarfile.open(fileobj=reader, mode="r|") as tar:
1659
+ tar.extractall(payload_dir)
1660
+ except Exception:
1661
+ tar_bytes = zstd.decompress(snapshot_bytes)
1662
+ with tarfile.open(fileobj=io.BytesIO(tar_bytes), mode="r") as tar:
1663
+ tar.extractall(payload_dir)
1664
+
1665
+ proj_info_path = os.path.join(payload_dir, "project_info.json")
1666
+ proj_meta_path = os.path.join(payload_dir, "project_meta.json")
1667
+ key_id_map_path = os.path.join(payload_dir, "key_id_map.json")
1668
+ manifest_path = os.path.join(payload_dir, "manifest.json")
1669
+
1670
+ project_info_json = load_json_file(proj_info_path)
1671
+ meta_json = load_json_file(proj_meta_path)
1672
+ manifest = load_json_file(manifest_path)
1673
+
1674
+ meta = ProjectMeta.from_json(meta_json)
1675
+ _ = KeyIdMap().load_json(key_id_map_path)
1676
+
1677
+ schema_version = manifest.get(VersionSchemaField.SCHEMA_VERSION) or manifest.get(
1678
+ "schema_version"
1679
+ )
1680
+ try:
1681
+ _ = get_video_snapshot_schema(schema_version)
1682
+ except Exception:
1683
+ raise RuntimeError(
1684
+ f"Unsupported video snapshot schema_version: {schema_version}"
1685
+ )
1686
+
1687
+ src_project_name = project_info_json.get("name")
1688
+ src_project_desc = project_info_json.get("description")
1689
+ src_project_readme = project_info_json.get("readme")
1690
+ if project_name is None:
1691
+ project_name = src_project_name
1692
+
1693
+ if api.project.exists(workspace_id, project_name):
1694
+ project_name = api.project.get_free_name(workspace_id, project_name)
1695
+
1696
+ project = api.project.create(
1697
+ workspace_id,
1698
+ project_name,
1699
+ ProjectType.VIDEOS,
1700
+ src_project_desc,
1701
+ readme=src_project_readme,
1702
+ )
1703
+ new_meta = api.project.update_meta(project.id, meta.to_json())
1704
+
1705
+ if with_custom_data:
1706
+ src_custom_data = project_info_json.get("custom_data") or {}
1707
+ try:
1708
+ api.project.update_custom_data(project.id, src_custom_data, silent=True)
1709
+ except Exception:
1710
+ logger.warning("Failed to restore project custom_data from snapshot")
1711
+
1712
+ if progress_cb is not None:
1713
+ log_progress = False
1714
+
1715
+ # Datasets
1716
+ ds_rows = []
1717
+ datasets_path = os.path.join(payload_dir, "datasets.parquet")
1718
+ if os.path.exists(datasets_path):
1719
+ ds_table = parquet.read_table(datasets_path)
1720
+ ds_rows = ds_table.to_pylist()
1721
+
1722
+ ds_rows.sort(
1723
+ key=lambda r: (r["parent_src_dataset_id"] is not None, r["parent_src_dataset_id"])
1724
+ )
1725
+
1726
+ dataset_mapping: Dict[int, DatasetInfo] = {}
1727
+ for row in ds_rows:
1728
+ src_ds_id = row["src_dataset_id"]
1729
+ parent_src_id = row["parent_src_dataset_id"]
1730
+ if parent_src_id is not None:
1731
+ parent_ds = dataset_mapping.get(parent_src_id)
1732
+ parent_id = parent_ds.id if parent_ds is not None else None
1733
+ else:
1734
+ parent_id = None
1735
+
1736
+ custom_data = None
1737
+ if with_custom_data:
1738
+ raw_cd = row.get("custom_data")
1739
+ if isinstance(raw_cd, str) and raw_cd.strip():
1740
+ try:
1741
+ custom_data = json.loads(raw_cd)
1742
+ except Exception:
1743
+ logger.warning(
1744
+ f"Failed to parse dataset custom_data for '{row.get('name')}', skipping it."
1745
+ )
1746
+ elif isinstance(raw_cd, dict):
1747
+ custom_data = raw_cd
1748
+
1749
+ ds = api.dataset.create(
1750
+ project.id,
1751
+ name=row["name"],
1752
+ description=row["description"],
1753
+ parent_id=parent_id,
1754
+ custom_data=custom_data,
1755
+ )
1756
+ if with_custom_data and custom_data is not None:
1757
+ try:
1758
+ api.dataset.update_custom_data(ds.id, custom_data)
1759
+ except Exception:
1760
+ logger.warning(f"Failed to restore custom_data for dataset '{row.get('name')}'")
1761
+ dataset_mapping[src_ds_id] = ds
1762
+
1763
+ # Videos
1764
+ v_rows = []
1765
+ videos_path = os.path.join(payload_dir, "videos.parquet")
1766
+ if os.path.exists(videos_path):
1767
+ v_table = parquet.read_table(videos_path)
1768
+ v_rows = v_table.to_pylist()
1769
+
1770
+ videos_by_dataset: Dict[int, List[dict]] = {}
1771
+ for row in v_rows:
1772
+ src_ds_id = row["src_dataset_id"]
1773
+ videos_by_dataset.setdefault(src_ds_id, []).append(row)
1774
+
1775
+ src_to_new_video: Dict[int, VideoInfo] = {}
1776
+
1777
+ for src_ds_id, rows in videos_by_dataset.items():
1778
+ ds_info = dataset_mapping.get(src_ds_id)
1779
+ if ds_info is None:
1780
+ logger.warning(
1781
+ f"Dataset with src id={src_ds_id} not found in mapping. "
1782
+ f"Skipping its videos."
1783
+ )
1784
+ continue
1785
+
1786
+ dataset_id = ds_info.id
1787
+ hashed_rows = [r for r in rows if r.get("hash")]
1788
+ link_rows = [r for r in rows if not r.get("hash") and r.get("link")]
1789
+
1790
+ ds_progress = progress_cb
1791
+ if log_progress and progress_cb is None:
1792
+ ds_progress = tqdm_sly(
1793
+ desc=f"Uploading videos to '{ds_info.name}'",
1794
+ total=len(rows),
1795
+ )
1796
+
1797
+ if hashed_rows:
1798
+ if skip_missed:
1799
+ existing_hashes = api.video.check_existing_hashes(
1800
+ list({r["hash"] for r in hashed_rows})
1801
+ )
1802
+ kept_hashed_rows = [r for r in hashed_rows if r["hash"] in existing_hashes]
1803
+ if not kept_hashed_rows:
1804
+ logger.warning(
1805
+ f"All hashed videos for dataset '{ds_info.name}' "
1806
+ f"are missing on server; nothing to upload."
1807
+ )
1808
+ hashed_rows = kept_hashed_rows
1809
+
1810
+ hashes = [r["hash"] for r in hashed_rows]
1811
+ names = [r["name"] for r in hashed_rows]
1812
+ metas: List[dict] = []
1813
+ for r in hashed_rows:
1814
+ meta_dict: dict = {}
1815
+ if r.get("meta"):
1816
+ try:
1817
+ meta_dict.update(json.loads(r["meta"]))
1818
+ except Exception:
1819
+ pass
1820
+ metas.append(meta_dict)
1821
+
1822
+ if hashes:
1823
+ new_infos = api.video.upload_hashes(
1824
+ dataset_id,
1825
+ names=names,
1826
+ hashes=hashes,
1827
+ metas=metas,
1828
+ progress_cb=ds_progress,
1829
+ )
1830
+ for row, new_info in zip(hashed_rows, new_infos):
1831
+ src_to_new_video[row["src_video_id"]] = new_info
1832
+ if with_custom_data and row.get("custom_data"):
1833
+ try:
1834
+ cd = json.loads(row["custom_data"])
1835
+ api.video.update_custom_data(new_info.id, cd)
1836
+ except Exception:
1837
+ logger.warning(
1838
+ f"Failed to restore custom_data for video '{new_info.name}'"
1839
+ )
1840
+
1841
+ if link_rows:
1842
+ links = [r["link"] for r in link_rows]
1843
+ names = [r["name"] for r in link_rows]
1844
+ metas: List[dict] = []
1845
+ for r in link_rows:
1846
+ meta_dict: dict = {}
1847
+ if r.get("meta"):
1848
+ try:
1849
+ meta_dict.update(json.loads(r["meta"]))
1850
+ except Exception:
1851
+ pass
1852
+ metas.append(meta_dict)
1853
+
1854
+ new_infos_links = api.video.upload_links(
1855
+ dataset_id,
1856
+ links=links,
1857
+ names=names,
1858
+ metas=metas,
1859
+ progress_cb=ds_progress,
1860
+ )
1861
+ for row, new_info in zip(link_rows, new_infos_links):
1862
+ src_to_new_video[row["src_video_id"]] = new_info
1863
+ if with_custom_data and row.get("custom_data"):
1864
+ try:
1865
+ cd = json.loads(row["custom_data"])
1866
+ api.video.update_custom_data(new_info.id, cd)
1867
+ except Exception:
1868
+ logger.warning(
1869
+ f"Failed to restore custom_data for video '{new_info.name}'"
1870
+ )
1871
+
1872
+ if ds_progress is not None:
1873
+ ds_progress(len(rows))
1874
+
1875
+ # Annotations
1876
+ ann_temp_dir = os.path.join(tmp_root, "anns")
1877
+ mkdir(ann_temp_dir)
1878
+
1879
+ anns_by_dataset: Dict[int, List[Tuple[int, str]]] = {}
1880
+ for row in v_rows:
1881
+ src_vid = row["src_video_id"]
1882
+ new_info = src_to_new_video.get(src_vid)
1883
+ if new_info is None:
1884
+ continue
1885
+ src_ds_id = row["src_dataset_id"]
1886
+ anns_by_dataset.setdefault(src_ds_id, []).append((new_info.id, row["ann_json"]))
1887
+
1888
+ for src_ds_id, items in anns_by_dataset.items():
1889
+ ds_info = dataset_mapping.get(src_ds_id)
1890
+ if ds_info is None:
1891
+ continue
1892
+
1893
+ video_ids: List[int] = []
1894
+ ann_paths: List[str] = []
1895
+
1896
+ for vid_id, ann_json_str in items:
1897
+ video_ids.append(vid_id)
1898
+ ann_path = os.path.join(ann_temp_dir, f"{vid_id}.json")
1899
+ try:
1900
+ parsed = json.loads(ann_json_str)
1901
+ except Exception:
1902
+ logger.warning(
1903
+ f"Failed to parse ann_json for restored video id={vid_id}, "
1904
+ f"skipping its annotation."
1905
+ )
1906
+ continue
1907
+ dump_json_file(parsed, ann_path)
1908
+ ann_paths.append(ann_path)
1909
+
1910
+ if not video_ids:
1911
+ continue
1912
+
1913
+ anns_progress = progress_cb
1914
+ if log_progress and progress_cb is None:
1915
+ anns_progress = tqdm_sly(
1916
+ desc=f"Uploading annotations to '{ds_info.name}'",
1917
+ total=len(video_ids),
1918
+ leave=False,
1919
+ )
1920
+ for vid_id, ann_path in zip(video_ids, ann_paths):
1921
+ try:
1922
+ ann_json = load_json_file(ann_path)
1923
+ ann = VideoAnnotation.from_json(
1924
+ ann_json,
1925
+ new_meta,
1926
+ key_id_map=KeyIdMap(),
1927
+ )
1928
+ except Exception as e:
1929
+ logger.warning(
1930
+ f"Failed to deserialize annotation for restored video id={vid_id}: {e}"
1931
+ )
1932
+ continue
1933
+
1934
+ api.video.annotation.append(vid_id, ann)
1935
+ if anns_progress is not None:
1936
+ anns_progress(1)
1937
+
1938
+ return project
1939
+
1940
+ finally:
1941
+ try:
1942
+ clean_dir(tmp_root)
1943
+ except Exception:
1944
+ pass
1945
+
1946
+ # --------------------- #
1947
+
1264
1948
 
1265
1949
  def download_video_project(
1266
1950
  api: Api,
@@ -1492,11 +2176,18 @@ def upload_video_project(
1492
2176
  if project_name is None:
1493
2177
  project_name = project_fs.name
1494
2178
 
2179
+ is_multiview = False
2180
+ try:
2181
+ if project_fs.meta.labeling_interface == LabelingInterface.MULTIVIEW:
2182
+ is_multiview = True
2183
+ except AttributeError:
2184
+ is_multiview = False
2185
+
1495
2186
  if api.project.exists(workspace_id, project_name):
1496
2187
  project_name = api.project.get_free_name(workspace_id, project_name)
1497
2188
 
1498
2189
  project = api.project.create(workspace_id, project_name, ProjectType.VIDEOS)
1499
- api.project.update_meta(project.id, project_fs.meta.to_json())
2190
+ project_meta = api.project.update_meta(project.id, project_fs.meta.to_json())
1500
2191
 
1501
2192
  if progress_cb is not None:
1502
2193
  log_progress = False
@@ -1564,7 +2255,14 @@ def upload_video_project(
1564
2255
  leave=False,
1565
2256
  )
1566
2257
  try:
1567
- api.video.annotation.upload_paths(video_ids, ann_paths, project_fs.meta, anns_progress)
2258
+ if is_multiview:
2259
+ api.video.annotation.upload_paths_multiview(
2260
+ video_ids, ann_paths, project_meta, anns_progress
2261
+ )
2262
+ else:
2263
+ api.video.annotation.upload_paths(
2264
+ video_ids, ann_paths, project_fs.meta, anns_progress
2265
+ )
1568
2266
  except Exception as e:
1569
2267
  logger.info(
1570
2268
  "INFO FOR DEBUGGING",
@@ -1584,7 +2282,7 @@ async def download_video_project_async(
1584
2282
  api: Api,
1585
2283
  project_id: int,
1586
2284
  dest_dir: str,
1587
- semaphore: Optional[asyncio.Semaphore] = None,
2285
+ semaphore: Optional[Union[asyncio.Semaphore, int]] = None,
1588
2286
  dataset_ids: Optional[List[int]] = None,
1589
2287
  download_videos: Optional[bool] = True,
1590
2288
  save_video_info: Optional[bool] = False,
@@ -1604,7 +2302,7 @@ async def download_video_project_async(
1604
2302
  :param dest_dir: Destination path to local directory.
1605
2303
  :type dest_dir: str
1606
2304
  :param semaphore: Semaphore to limit the number of simultaneous downloads of items.
1607
- :type semaphore: asyncio.Semaphore, optional
2305
+ :type semaphore: asyncio.Semaphore or int, optional
1608
2306
  :param dataset_ids: Specified list of Dataset IDs which will be downloaded. Datasets could be downloaded from different projects but with the same data type.
1609
2307
  :type dataset_ids: list(int), optional
1610
2308
  :param download_videos: Include videos in the download.
@@ -1643,6 +2341,8 @@ async def download_video_project_async(
1643
2341
  """
1644
2342
  if semaphore is None:
1645
2343
  semaphore = api.get_default_semaphore()
2344
+ elif isinstance(semaphore, int):
2345
+ semaphore = asyncio.Semaphore(semaphore)
1646
2346
 
1647
2347
  key_id_map = KeyIdMap()
1648
2348
 
@@ -1706,7 +2406,6 @@ async def download_video_project_async(
1706
2406
 
1707
2407
  project_fs.set_key_id_map(key_id_map)
1708
2408
 
1709
-
1710
2409
  def _log_warning(
1711
2410
  video: VideoInfo,
1712
2411
  video_file_path: Optional[str] = None,
@@ -1734,7 +2433,7 @@ def _log_warning(
1734
2433
  async def _download_project_item_async(
1735
2434
  api: Api,
1736
2435
  video: VideoInfo,
1737
- semaphore: asyncio.Semaphore,
2436
+ semaphore: Union[asyncio.Semaphore, int],
1738
2437
  dataset: DatasetInfo,
1739
2438
  dest_dir: str,
1740
2439
  project_fs: Project,
@@ -1749,6 +2448,9 @@ async def _download_project_item_async(
1749
2448
  This function downloads a video item from the project in Supervisely platform asynchronously.
1750
2449
  """
1751
2450
 
2451
+ if isinstance(semaphore, int):
2452
+ semaphore = asyncio.Semaphore(semaphore)
2453
+
1752
2454
  try:
1753
2455
  ann_json = await api.video.annotation.download_async(video.id, video, semaphore=semaphore)
1754
2456
  ann_json = ann_json[0]