supervisely 6.73.221__py3-none-any.whl → 6.73.223__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -1,15 +1,24 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import os
4
+ from pathlib import Path
4
5
  from typing import Dict, List, Optional, Tuple, Union
5
6
 
6
7
  from tqdm import tqdm
7
8
 
8
- from supervisely._utils import is_production
9
+ from supervisely._utils import batched, is_production
9
10
  from supervisely.annotation.annotation import Annotation
10
11
  from supervisely.annotation.tag_meta import TagValueType
11
12
  from supervisely.api.api import Api
12
- from supervisely.io.fs import get_file_ext, get_file_name_with_ext
13
+ from supervisely.io.env import team_id
14
+ from supervisely.io.fs import (
15
+ get_file_ext,
16
+ get_file_name_with_ext,
17
+ is_archive,
18
+ remove_dir,
19
+ silent_remove,
20
+ unpack_archive,
21
+ )
13
22
  from supervisely.project.project_meta import ProjectMeta
14
23
  from supervisely.project.project_settings import LabelingInterface
15
24
  from supervisely.sly_logger import logger
@@ -145,12 +154,16 @@ class BaseConverter:
145
154
  remote_files_map: Optional[Dict[str, str]] = None,
146
155
  ):
147
156
  self._input_data: str = input_data
148
- self._items: List[self.BaseItem] = []
157
+ self._items: List[BaseConverter.BaseItem] = []
149
158
  self._meta: ProjectMeta = None
150
159
  self._labeling_interface = labeling_interface or LabelingInterface.DEFAULT
160
+
161
+ # import as links settings
151
162
  self._upload_as_links: bool = upload_as_links
152
163
  self._remote_files_map: Optional[Dict[str, str]] = remote_files_map
153
164
  self._supports_links = False # if converter supports uploading by links
165
+ self._api = Api.from_env() if self._upload_as_links else None
166
+ self._team_id = team_id() if self._upload_as_links else None
154
167
  self._converter = None
155
168
 
156
169
  if self._labeling_interface not in LabelingInterface.values():
@@ -419,3 +432,81 @@ class BaseConverter:
419
432
 
420
433
  return meta1.clone(project_settings=new_settings)
421
434
  return meta1
435
+
436
+ def _download_remote_ann_files(self) -> None:
437
+ """
438
+ Download all annotation files from Cloud Storage to the local storage.
439
+ Needed to detect annotation format if "upload_as_links" is enabled.
440
+ """
441
+ if not self.upload_as_links:
442
+ return
443
+
444
+ ann_archives = {l: r for l, r in self._remote_files_map.items() if is_archive(l)}
445
+
446
+ anns_to_download = {
447
+ l: r for l, r in self._remote_files_map.items() if get_file_ext(l) == self.ann_ext
448
+ }
449
+ if not anns_to_download and not ann_archives:
450
+ return
451
+
452
+ import asyncio
453
+
454
+ for files_type, files in {
455
+ "annotations": anns_to_download,
456
+ "archives": ann_archives,
457
+ }.items():
458
+ if not files:
459
+ continue
460
+
461
+ is_archive_type = files_type == "archives"
462
+
463
+ file_size = None
464
+ if is_archive_type:
465
+ logger.info(f"Remote archives detected.")
466
+ file_size = sum(
467
+ self._api.storage.get_info_by_path(self._team_id, remote_path).sizeb
468
+ for remote_path in files.values()
469
+ )
470
+
471
+ loop = asyncio.get_event_loop()
472
+ _, progress_cb = self.get_progress(
473
+ len(files) if not is_archive_type else file_size,
474
+ f"Downloading {files_type} from remote storage",
475
+ is_size=is_archive_type,
476
+ )
477
+
478
+ for local_path in files.keys():
479
+ silent_remove(local_path)
480
+
481
+ logger.info(f"Downloading {files_type} from remote storage...")
482
+ loop.run_until_complete(
483
+ self._api.storage.download_bulk_async(
484
+ team_id=self._team_id,
485
+ remote_paths=list(files.values()),
486
+ local_save_paths=list(files.keys()),
487
+ progress_cb=progress_cb,
488
+ progress_cb_type="number" if not is_archive_type else "size",
489
+ )
490
+ )
491
+ logger.info("Possible annotations downloaded successfully.")
492
+
493
+ if is_archive_type:
494
+ for local_path in files.keys():
495
+ parent_dir = Path(local_path).parent
496
+ if parent_dir.name == "ann":
497
+ target_dir = parent_dir
498
+ else:
499
+ target_dir = parent_dir / "ann"
500
+ target_dir.mkdir(parents=True, exist_ok=True)
501
+
502
+ unpack_archive(local_path, str(target_dir))
503
+ silent_remove(local_path)
504
+
505
+ dirs = [d for d in target_dir.iterdir() if d.is_dir()]
506
+ files = [f for f in target_dir.iterdir() if f.is_file()]
507
+ if len(dirs) == 1 and len(files) == 0:
508
+ for file in dirs[0].iterdir():
509
+ file.rename(target_dir / file.name)
510
+ remove_dir(str(dirs[0]))
511
+
512
+ logger.info(f"Archive {local_path} unpacked successfully to {str(target_dir)}")
@@ -1,15 +1,16 @@
1
1
  import os
2
2
  from pathlib import Path
3
+ from typing import Optional
3
4
 
4
5
  from tqdm import tqdm
5
6
 
6
- from typing import Literal, Optional
7
-
8
7
  from supervisely._utils import is_production
9
8
  from supervisely.api.api import Api
10
9
  from supervisely.app import get_data_dir
11
10
  from supervisely.convert.image.csv.csv_converter import CSVConverter
12
- from supervisely.convert.image.high_color.high_color_depth import HighColorDepthImageConverter
11
+ from supervisely.convert.image.high_color.high_color_depth import (
12
+ HighColorDepthImageConverter,
13
+ )
13
14
  from supervisely.convert.image.image_converter import ImageConverter
14
15
  from supervisely.convert.pointcloud.pointcloud_converter import PointcloudConverter
15
16
  from supervisely.convert.pointcloud_episodes.pointcloud_episodes_converter import (
@@ -28,10 +29,10 @@ from supervisely.io.fs import (
28
29
  touch,
29
30
  unpack_archive,
30
31
  )
32
+ from supervisely.project.project_settings import LabelingInterface
31
33
  from supervisely.project.project_type import ProjectType
32
34
  from supervisely.sly_logger import logger
33
35
  from supervisely.task.progress import Progress
34
- from supervisely.project.project_settings import LabelingInterface
35
36
 
36
37
 
37
38
  class ImportManager:
@@ -164,7 +165,7 @@ class ImportManager:
164
165
  dir_path = remote_path.rstrip("/") if is_dir else os.path.dirname(remote_path)
165
166
  dir_name = os.path.basename(dir_path)
166
167
 
167
- local_path = os.path.join(get_data_dir(), dir_name)
168
+ local_path = os.path.abspath(os.path.join(get_data_dir(), dir_name))
168
169
  mkdir(local_path, remove_content_if_exists=True)
169
170
 
170
171
  if is_dir:
@@ -172,17 +173,22 @@ class ImportManager:
172
173
  else:
173
174
  files = [self._api.storage.get_info_by_path(self._team_id, remote_path)]
174
175
 
176
+ unique_directories = set()
175
177
  for file in files:
176
178
  new_path = file.path.replace(dir_path, local_path)
177
179
  self._remote_files_map[new_path] = file.path
178
180
  Path(new_path).parent.mkdir(parents=True, exist_ok=True)
181
+ unique_directories.add(str(Path(file.path).parent))
179
182
  touch(new_path)
180
183
 
184
+ logger.info(f"Scanned remote directories:\n - " + "\n - ".join(unique_directories))
181
185
  return local_path
182
186
 
183
187
  def _unpack_archives(self, local_path):
184
188
  """Unpack if input data contains an archive."""
185
189
 
190
+ if self._upload_as_links:
191
+ return
186
192
  new_paths_to_scan = [local_path]
187
193
  while len(new_paths_to_scan) > 0:
188
194
  archives = []
@@ -114,6 +114,7 @@ class ImageConverter(BaseConverter):
114
114
  batch_size: int = 50,
115
115
  log_progress=True,
116
116
  entities: List[Item] = None,
117
+ progress_cb=None,
117
118
  ) -> None:
118
119
  """Upload converted data to Supervisely"""
119
120
  dataset_info = api.dataset.get_info_by_id(dataset_id, raise_error=True)
@@ -122,10 +123,14 @@ class ImageConverter(BaseConverter):
122
123
  meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
123
124
 
124
125
  existing_names = set([img.name for img in api.image.get_list(dataset_id)])
125
- if log_progress:
126
- progress, progress_cb = self.get_progress(self.items_count, "Uploading images...")
127
- else:
128
- progress_cb = None
126
+ progress = None
127
+ if progress_cb is not None:
128
+ log_progress = True
129
+ elif log_progress:
130
+ progress, progress_cb = self.get_progress(self.items_count, "Uploading")
131
+
132
+ if self.upload_as_links:
133
+ batch_size = 1000
129
134
 
130
135
  for batch in batched(entities or self._items, batch_size=batch_size):
131
136
  item_names = []
@@ -137,8 +142,8 @@ class ImageConverter(BaseConverter):
137
142
  if item.path is None:
138
143
  continue # image has failed validation
139
144
  item.name = f"{get_file_name(item.path)}{get_file_ext(item.path).lower()}"
140
- if self.upload_as_links:
141
- ann = None # TODO: implement
145
+ if self.upload_as_links and not self.supports_links:
146
+ ann = None
142
147
  else:
143
148
  ann = self.to_supervisely(item, meta, renamed_classes, renamed_tags)
144
149
  name = generate_free_name(
@@ -160,27 +165,40 @@ class ImageConverter(BaseConverter):
160
165
  with ApiContext(
161
166
  api=api, project_id=project_id, dataset_id=dataset_id, project_meta=meta
162
167
  ):
163
- upload_method = (
164
- api.image.upload_links if self.upload_as_links else api.image.upload_paths
165
- )
166
- img_infos = upload_method(
167
- dataset_id,
168
- item_names,
169
- item_paths,
170
- metas=item_metas,
171
- conflict_resolution="rename",
172
- )
168
+ if self.upload_as_links:
169
+ img_infos = api.image.upload_links(
170
+ dataset_id,
171
+ item_names,
172
+ item_paths,
173
+ metas=item_metas,
174
+ batch_size=batch_size,
175
+ conflict_resolution="rename",
176
+ force_metadata_for_links=False,
177
+ )
178
+ else:
179
+ img_infos = api.image.upload_paths(
180
+ dataset_id,
181
+ item_names,
182
+ item_paths,
183
+ metas=item_metas,
184
+ conflict_resolution="rename",
185
+ )
186
+
173
187
  img_ids = [img_info.id for img_info in img_infos]
174
188
  if len(anns) == len(img_ids):
175
- api.annotation.upload_anns(img_ids, anns)
189
+ api.annotation.upload_anns(
190
+ img_ids, anns, skip_bounds_validation=self.upload_as_links
191
+ )
176
192
 
177
193
  if log_progress:
178
194
  progress_cb(len(batch))
179
195
 
180
196
  if log_progress:
181
- if is_development():
197
+ if is_development() and progress is not None:
182
198
  progress.close()
183
- logger.info(f"Dataset ID:'{dataset_id}' has been successfully uploaded.")
199
+ logger.info(
200
+ f"Dataset has been successfully uploaded → {dataset_info.name}, ID:{dataset_id}"
201
+ )
184
202
 
185
203
  def validate_image(self, path: str) -> Tuple[str, str]:
186
204
  if self.upload_as_links:
@@ -188,6 +206,9 @@ class ImageConverter(BaseConverter):
188
206
  return image_helper.validate_image(path)
189
207
 
190
208
  def is_image(self, path: str) -> bool:
209
+ if self._upload_as_links and self.supports_links:
210
+ ext = get_file_ext(path)
211
+ return ext.lower() in self.allowed_exts
191
212
  mimetypes.add_type("image/heic", ".heic") # to extend types_map
192
213
  mimetypes.add_type("image/heif", ".heif") # to extend types_map
193
214
  mimetypes.add_type("image/jpeg", ".jfif") # to extend types_map
@@ -20,6 +20,10 @@ from supervisely.convert.image.image_helper import validate_image_bounds
20
20
 
21
21
  class FastSlyImageConverter(SLYImageConverter, ImageConverter):
22
22
 
23
+ def __init__(self, *args, **kwargs):
24
+ super().__init__(*args, **kwargs)
25
+ self._supports_links = False
26
+
23
27
  def validate_format(self) -> bool:
24
28
 
25
29
  detected_ann_cnt = 0
@@ -2,21 +2,21 @@ import os
2
2
  from typing import Dict, Optional
3
3
 
4
4
  import supervisely.convert.image.sly.sly_image_helper as sly_image_helper
5
- from supervisely.convert.image.image_helper import validate_image_bounds
6
5
  from supervisely import (
7
6
  Annotation,
8
7
  Dataset,
8
+ Label,
9
9
  OpenMode,
10
10
  Project,
11
11
  ProjectMeta,
12
12
  Rectangle,
13
- Label,
14
13
  logger,
15
14
  )
16
- from supervisely._utils import generate_free_name
15
+ from supervisely._utils import generate_free_name, is_development
17
16
  from supervisely.api.api import Api
18
17
  from supervisely.convert.base_converter import AvailableImageConverters
19
18
  from supervisely.convert.image.image_converter import ImageConverter
19
+ from supervisely.convert.image.image_helper import validate_image_bounds
20
20
  from supervisely.io.fs import dirs_filter, file_exists, get_file_ext
21
21
  from supervisely.io.json import load_json_file
22
22
  from supervisely.project.project import find_project_dirs
@@ -31,6 +31,7 @@ class SLYImageConverter(ImageConverter):
31
31
  def __init__(self, *args, **kwargs):
32
32
  super().__init__(*args, **kwargs)
33
33
  self._project_structure = None
34
+ self._supports_links = True
34
35
 
35
36
  def __str__(self):
36
37
  return AvailableImageConverters.SLY
@@ -74,6 +75,8 @@ class SLYImageConverter(ImageConverter):
74
75
  return False
75
76
 
76
77
  def validate_format(self) -> bool:
78
+ if self.upload_as_links and self._supports_links:
79
+ self._download_remote_ann_files()
77
80
  if self.read_sly_project(self._input_data):
78
81
  return True
79
82
 
@@ -136,6 +139,8 @@ class SLYImageConverter(ImageConverter):
136
139
  meta = self._meta
137
140
 
138
141
  if item.ann_data is None:
142
+ if self._upload_as_links:
143
+ item.set_shape([None, None])
139
144
  return item.create_empty_annotation()
140
145
 
141
146
  try:
@@ -151,7 +156,7 @@ class SLYImageConverter(ImageConverter):
151
156
  )
152
157
  return Annotation.from_json(ann_json, meta).clone(labels=labels)
153
158
  except Exception as e:
154
- logger.warn(f"Failed to convert annotation: {repr(e)}")
159
+ logger.warning(f"Failed to convert annotation: {repr(e)}")
155
160
  return item.create_empty_annotation()
156
161
 
157
162
  def read_sly_project(self, input_data: str) -> bool:
@@ -163,7 +168,9 @@ class SLYImageConverter(ImageConverter):
163
168
  logger.debug("Trying to find Supervisely project format in the input data")
164
169
  project_dirs = [d for d in find_project_dirs(input_data)]
165
170
  if len(project_dirs) > 1:
166
- logger.info("Found multiple Supervisely projects")
171
+ logger.info("Found multiple possible Supervisely projects in the input data")
172
+ else:
173
+ logger.info("Possible Supervisely project found in the input data")
167
174
  meta = None
168
175
  for project_dir in project_dirs:
169
176
  project_fs = Project(project_dir, mode=OpenMode.READ)
@@ -276,6 +283,12 @@ class SLYImageConverter(ImageConverter):
276
283
  existing_datasets = api.dataset.get_list(project_id, recursive=True)
277
284
  existing_datasets = {ds.name for ds in existing_datasets}
278
285
 
286
+ if log_progress:
287
+ progress, progress_cb = self.get_progress(self.items_count, "Uploading project")
288
+ else:
289
+ progress, progress_cb = None, None
290
+
291
+ logger.info("Uploading project structure")
279
292
  def _upload_project(
280
293
  project_structure: Dict,
281
294
  project_id: int,
@@ -283,7 +296,6 @@ class SLYImageConverter(ImageConverter):
283
296
  parent_id: Optional[int] = None,
284
297
  first_dataset=False,
285
298
  ):
286
-
287
299
  for ds_name, value in project_structure.items():
288
300
  ds_name = generate_free_name(existing_datasets, ds_name, extend_used_names=True)
289
301
  if first_dataset:
@@ -293,13 +305,17 @@ class SLYImageConverter(ImageConverter):
293
305
  dataset_id = api.dataset.create(project_id, ds_name, parent_id=parent_id).id
294
306
 
295
307
  items = value.get(DATASET_ITEMS, [])
308
+ nested_datasets = value.get(NESTED_DATASETS, {})
309
+ logger.info(f"Dataset: {ds_name}, items: {len(items)}, nested datasets: {len(nested_datasets)}")
296
310
  if items:
297
311
  super(SLYImageConverter, self).upload_dataset(
298
- api, dataset_id, batch_size, log_progress, entities=items
312
+ api, dataset_id, batch_size, entities=items, progress_cb=progress_cb
299
313
  )
300
314
 
301
- nested_datasets = value.get(NESTED_DATASETS, {})
302
315
  if nested_datasets:
303
316
  _upload_project(nested_datasets, project_id, dataset_id, dataset_id)
304
317
 
305
318
  _upload_project(self._project_structure, project_id, dataset_id, first_dataset=True)
319
+
320
+ if is_development() and progress is not None:
321
+ progress.close()
@@ -12,6 +12,10 @@ from supervisely.video.video import validate_ext as validate_video_ext
12
12
 
13
13
  class SLYVideoConverter(VideoConverter):
14
14
 
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+ self._supports_links = True
18
+
15
19
  def __str__(self) -> str:
16
20
  return AvailableVideoConverters.SLY
17
21
 
@@ -45,6 +49,8 @@ class SLYVideoConverter(VideoConverter):
45
49
  return False
46
50
 
47
51
  def validate_format(self) -> bool:
52
+ if self.upload_as_links and self._supports_links:
53
+ self._download_remote_ann_files()
48
54
  detected_ann_cnt = 0
49
55
  videos_list, ann_dict = [], {}
50
56
  for root, _, files in os.walk(self._input_data):
@@ -103,6 +109,8 @@ class SLYVideoConverter(VideoConverter):
103
109
  meta = self._meta
104
110
 
105
111
  if item.ann_data is None:
112
+ if self._upload_as_links:
113
+ return None
106
114
  return item.create_empty_annotation()
107
115
 
108
116
  try:
@@ -113,5 +121,5 @@ class SLYVideoConverter(VideoConverter):
113
121
  ann_json = sly_video_helper.rename_in_json(ann_json, renamed_classes, renamed_tags)
114
122
  return VideoAnnotation.from_json(ann_json, meta)
115
123
  except Exception as e:
116
- logger.warn(f"Failed to convert annotation: {repr(e)}")
124
+ logger.warning(f"Failed to convert annotation: {repr(e)}")
117
125
  return item.create_empty_annotation()
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  import subprocess
3
- from typing import Dict, Optional, Union
3
+ from typing import Dict, Optional, Tuple, Union
4
4
 
5
5
  import cv2
6
6
  import magic
@@ -57,10 +57,22 @@ class VideoConverter(BaseConverter):
57
57
  self._frame_count = frame_count
58
58
  self._custom_data = custom_data if custom_data is not None else {}
59
59
 
60
+ @property
61
+ def shape(self) -> Tuple[int, int]:
62
+ return self._shape
63
+
64
+ @shape.setter
65
+ def shape(self, shape: Optional[Tuple[int, int]] = None):
66
+ self._shape = shape if shape is not None else [None, None]
67
+
60
68
  @property
61
69
  def frame_count(self) -> int:
62
70
  return self._frame_count
63
71
 
72
+ @frame_count.setter
73
+ def frame_count(self, frame_count: int):
74
+ self._frame_count = frame_count
75
+
64
76
  @property
65
77
  def name(self) -> str:
66
78
  if self._name is not None:
@@ -75,11 +87,11 @@ class VideoConverter(BaseConverter):
75
87
  return VideoAnnotation(self._shape, self._frame_count)
76
88
 
77
89
  def __init__(
78
- self,
79
- input_data: str,
80
- labeling_interface: Optional[Union[LabelingInterface, str]],
81
- upload_as_links: bool,
82
- remote_files_map: Optional[Dict[str, str]] = None,
90
+ self,
91
+ input_data: str,
92
+ labeling_interface: Optional[Union[LabelingInterface, str]],
93
+ upload_as_links: bool,
94
+ remote_files_map: Optional[Dict[str, str]] = None,
83
95
  ):
84
96
  super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
85
97
  self._key_id_map: KeyIdMap = None
@@ -114,7 +126,9 @@ class VideoConverter(BaseConverter):
114
126
  existing_names = set([vid.name for vid in api.video.get_list(dataset_id)])
115
127
 
116
128
  # check video codecs, mimetypes and convert if needed
117
- convert_progress, convert_progress_cb = self.get_progress(self.items_count, "Preparing videos...")
129
+ convert_progress, convert_progress_cb = self.get_progress(
130
+ self.items_count, "Preparing videos..."
131
+ )
118
132
  for item in self._items:
119
133
  item_name, item_path = self.convert_to_mp4_if_needed(item.path)
120
134
  item.name = item_name
@@ -124,15 +138,19 @@ class VideoConverter(BaseConverter):
124
138
  convert_progress.close()
125
139
 
126
140
  has_large_files = False
141
+ size_progress_cb = None
127
142
  progress_cb, progress, ann_progress, ann_progress_cb = None, None, None, None
128
- if log_progress and not self.upload_as_links:
143
+ if log_progress:
129
144
  progress, progress_cb = self.get_progress(self.items_count, "Uploading videos...")
130
- file_sizes = [get_file_size(item.path) for item in self._items]
131
- has_large_files = any([self._check_video_file_size(file_size) for file_size in file_sizes])
132
- if has_large_files:
133
- upload_progress = []
134
- size_progress_cb = self._get_video_upload_progress(upload_progress)
135
- batch_size = 1 if has_large_files or not self.upload_as_links else batch_size
145
+ if not self.upload_as_links:
146
+ file_sizes = [get_file_size(item.path) for item in self._items]
147
+ has_large_files = any(
148
+ [self._check_video_file_size(file_size) for file_size in file_sizes]
149
+ )
150
+ if has_large_files:
151
+ upload_progress = []
152
+ size_progress_cb = self._get_video_upload_progress(upload_progress)
153
+ batch_size = 1 if has_large_files and not self.upload_as_links else batch_size
136
154
 
137
155
  for batch in batched(self._items, batch_size=batch_size):
138
156
  item_names = []
@@ -146,17 +164,21 @@ class VideoConverter(BaseConverter):
146
164
  item_paths.append(item.path)
147
165
  item_names.append(item.name)
148
166
 
149
- if not self.upload_as_links:
150
- # TODO: implement generating annotations for remote videos
167
+ ann = None
168
+ if not self.upload_as_links or self.supports_links:
151
169
  ann = self.to_supervisely(item, meta, renamed_classes, renamed_tags)
152
- figures_cnt += len(ann.figures)
153
- anns.append(ann)
170
+ if ann is not None:
171
+ figures_cnt += len(ann.figures)
172
+ anns.append(ann)
154
173
 
155
174
  if self.upload_as_links:
156
175
  vid_infos = api.video.upload_links(
157
176
  dataset_id,
158
177
  item_paths,
159
178
  item_names,
179
+ skip_download=True,
180
+ progress_cb=progress_cb if log_progress else None,
181
+ force_metadata_for_links=False,
160
182
  )
161
183
  else:
162
184
  vid_infos = api.video.upload_paths(
@@ -164,22 +186,24 @@ class VideoConverter(BaseConverter):
164
186
  item_names,
165
187
  item_paths,
166
188
  progress_cb=progress_cb if log_progress else None,
167
- item_progress=size_progress_cb if log_progress and has_large_files else None, # pylint: disable=used-before-assignment
189
+ item_progress=(size_progress_cb if log_progress and has_large_files else None),
168
190
  )
169
- vid_ids = [vid_info.id for vid_info in vid_infos]
191
+ vid_ids = [vid_info.id for vid_info in vid_infos]
170
192
 
171
- if log_progress and has_large_files and figures_cnt > 0:
172
- ann_progress, ann_progress_cb = self.get_progress(figures_cnt, "Uploading annotations...")
193
+ if log_progress and has_large_files and figures_cnt > 0:
194
+ ann_progress, ann_progress_cb = self.get_progress(
195
+ figures_cnt, "Uploading annotations..."
196
+ )
173
197
 
174
- for video_id, ann in zip(vid_ids, anns):
175
- if ann is None:
176
- ann = VideoAnnotation(item.shape, item.frame_count)
177
- api.video.annotation.append(video_id, ann, progress_cb=ann_progress_cb)
198
+ for vid, ann, item, info in zip(vid_ids, anns, batch, vid_infos):
199
+ if ann is None:
200
+ ann = VideoAnnotation((info.frame_height, info.frame_width), info.frames_count)
201
+ api.video.annotation.append(vid, ann, progress_cb=ann_progress_cb)
178
202
 
179
203
  if log_progress and is_development():
180
- if progress is not None: # pylint: disable=possibly-used-before-assignment
204
+ if progress is not None:
181
205
  progress.close()
182
- if not self.upload_as_links and ann_progress is not None:
206
+ if ann_progress is not None:
183
207
  ann_progress.close()
184
208
  logger.info(f"Dataset ID:{dataset_id} has been successfully uploaded.")
185
209
 
@@ -268,7 +292,7 @@ class VideoConverter(BaseConverter):
268
292
  )
269
293
 
270
294
  def _check_video_file_size(self, file_size):
271
- return file_size > 20 * 1024 * 1024 # 20 MB
295
+ return file_size > 20 * 1024 * 1024 # 20 MB
272
296
 
273
297
  def _get_video_upload_progress(self, upload_progress):
274
298
  upload_progress = []