supervisely 6.73.376__py3-none-any.whl → 6.73.378__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/__init__.py +1 -0
- supervisely/_utils.py +11 -0
- supervisely/api/entity_annotation/tag_api.py +223 -20
- supervisely/api/image_api.py +81 -1
- supervisely/app/fastapi/index.html +20 -0
- supervisely/app/widgets/__init__.py +1 -0
- supervisely/app/widgets/flexbox/flexbox.py +4 -2
- supervisely/app/widgets/flexbox/template.html +5 -9
- supervisely/app/widgets/input_number/input_number.py +2 -0
- supervisely/app/widgets/input_number/template.html +3 -0
- supervisely/app/widgets/sampling/__init__.py +0 -0
- supervisely/app/widgets/sampling/sampling.py +550 -0
- supervisely/app/widgets/select_dataset/select_dataset.py +15 -5
- supervisely/app/widgets/sly_tqdm/sly_tqdm.py +9 -0
- supervisely/convert/image/sly/sly_image_converter.py +10 -7
- supervisely/video/sampling.py +550 -0
- {supervisely-6.73.376.dist-info → supervisely-6.73.378.dist-info}/METADATA +1 -1
- {supervisely-6.73.376.dist-info → supervisely-6.73.378.dist-info}/RECORD +22 -19
- {supervisely-6.73.376.dist-info → supervisely-6.73.378.dist-info}/LICENSE +0 -0
- {supervisely-6.73.376.dist-info → supervisely-6.73.378.dist-info}/WHEEL +0 -0
- {supervisely-6.73.376.dist-info → supervisely-6.73.378.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.376.dist-info → supervisely-6.73.378.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,550 @@
|
|
|
1
|
+
from typing import Dict, List, Union
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
from supervisely._utils import batched_iter
|
|
7
|
+
from supervisely.annotation.annotation import Annotation
|
|
8
|
+
from supervisely.annotation.label import Label
|
|
9
|
+
from supervisely.annotation.tag import Tag
|
|
10
|
+
from supervisely.annotation.tag_collection import TagCollection
|
|
11
|
+
from supervisely.annotation.tag_meta import TagApplicableTo, TagMeta, TagValueType
|
|
12
|
+
from supervisely.api.api import Api
|
|
13
|
+
from supervisely.api.dataset_api import DatasetInfo
|
|
14
|
+
from supervisely.api.image_api import ImageInfo
|
|
15
|
+
from supervisely.api.project_api import ProjectInfo
|
|
16
|
+
from supervisely.api.video.video_api import VideoInfo
|
|
17
|
+
from supervisely.project.project_meta import ProjectMeta
|
|
18
|
+
from supervisely.task.progress import tqdm_sly
|
|
19
|
+
from supervisely.video.video import VideoFrameReader
|
|
20
|
+
from supervisely.video_annotation.frame import Frame
|
|
21
|
+
from supervisely.video_annotation.key_id_map import KeyIdMap
|
|
22
|
+
from supervisely.video_annotation.video_annotation import VideoAnnotation
|
|
23
|
+
|
|
24
|
+
VIDEO_OBJECT_TAG_META = TagMeta(
|
|
25
|
+
"object_id",
|
|
26
|
+
value_type=TagValueType.ANY_NUMBER,
|
|
27
|
+
applicable_to=TagApplicableTo.OBJECTS_ONLY,
|
|
28
|
+
)
|
|
29
|
+
AUTO_TRACKED_TAG_META = TagMeta(
|
|
30
|
+
"auto-tracked", TagValueType.NONE, applicable_to=TagApplicableTo.OBJECTS_ONLY
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class ApiContext:
|
|
35
|
+
def __init__(self):
|
|
36
|
+
self.project_info: Dict[int, ProjectInfo] = {}
|
|
37
|
+
self.project_meta: Dict[int, ProjectMeta] = {}
|
|
38
|
+
self.dataset_info: Dict[int, DatasetInfo] = {}
|
|
39
|
+
self.video_info: Dict[int, VideoInfo] = {}
|
|
40
|
+
self.children_datasets: Dict[int, List[DatasetInfo]] = {}
|
|
41
|
+
self.children_items: Dict[int, List[Union[ImageInfo, VideoInfo]]] = {}
|
|
42
|
+
|
|
43
|
+
def __getattr__(self, item: str) -> Dict:
|
|
44
|
+
if not hasattr(self, item):
|
|
45
|
+
new_dict = {}
|
|
46
|
+
setattr(self, item, new_dict)
|
|
47
|
+
return getattr(self, item)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SamplingSettings:
|
|
51
|
+
ONLY_ANNOTATED = "annotated"
|
|
52
|
+
START = "start"
|
|
53
|
+
END = "end"
|
|
54
|
+
STEP = "step"
|
|
55
|
+
RESIZE = "resize"
|
|
56
|
+
COPY_ANNOTATIONS = "copy_annotations"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _get_frame_indices(frames_count, start, end, step, only_annotated, video_annotation):
|
|
60
|
+
frame_indices = list(range(start, end if end is not None else frames_count))
|
|
61
|
+
if only_annotated:
|
|
62
|
+
annotated_frames = set()
|
|
63
|
+
for frame in video_annotation.frames:
|
|
64
|
+
frame: Frame
|
|
65
|
+
if frame.figures:
|
|
66
|
+
annotated_frames.add(frame.index)
|
|
67
|
+
frame_indices = [idx for idx in frame_indices if idx in annotated_frames]
|
|
68
|
+
frame_indices = [frame_indices[i] for i in range(0, len(frame_indices), step)]
|
|
69
|
+
return frame_indices
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _frame_to_annotation(frame: Frame, video_annotation: VideoAnnotation) -> Annotation:
|
|
73
|
+
|
|
74
|
+
labels = []
|
|
75
|
+
for figure in frame.figures:
|
|
76
|
+
tags = []
|
|
77
|
+
video_object = figure.parent_object
|
|
78
|
+
obj_class = video_object.obj_class
|
|
79
|
+
geometry = figure.geometry
|
|
80
|
+
for tag in video_object.tags:
|
|
81
|
+
if tag.frame_range is None or tag.frame_range[0] <= frame.index <= tag.frame_range[1]:
|
|
82
|
+
tags.append(Tag(tag.meta, tag.value, labeler_login=tag.labeler_login))
|
|
83
|
+
tags.append(Tag(VIDEO_OBJECT_TAG_META, video_object.class_id))
|
|
84
|
+
if figure.track_id is not None:
|
|
85
|
+
tags.append(Tag(AUTO_TRACKED_TAG_META, None, labeler_login=video_object.labeler_login))
|
|
86
|
+
label = Label(geometry, obj_class, TagCollection(tags))
|
|
87
|
+
labels.append(label)
|
|
88
|
+
img_tags = []
|
|
89
|
+
for tag in video_annotation.tags:
|
|
90
|
+
if tag.frame_range is None or tag.frame_range[0] <= frame.index <= tag.frame_range[1]:
|
|
91
|
+
img_tags.append(Tag(tag.meta, tag.value, labeler_login=tag.labeler_login))
|
|
92
|
+
return Annotation(video_annotation.img_size, labels=labels, img_tags=TagCollection(img_tags))
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _upload_annotations(api: Api, image_ids, frame_indices, video_annotation: VideoAnnotation):
|
|
96
|
+
anns = []
|
|
97
|
+
for image_id, frame_index in zip(image_ids, frame_indices):
|
|
98
|
+
frame = video_annotation.frames.get(frame_index, None)
|
|
99
|
+
if frame is not None:
|
|
100
|
+
anns.append(_frame_to_annotation(frame, video_annotation))
|
|
101
|
+
api.annotation.upload_anns(image_ids, anns=anns)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _upload_frames(
|
|
105
|
+
api: Api,
|
|
106
|
+
frames: List[np.ndarray],
|
|
107
|
+
video_name: str,
|
|
108
|
+
video_frames_count: int,
|
|
109
|
+
indices: List[int],
|
|
110
|
+
dataset_id: int,
|
|
111
|
+
sample_info: Dict = None,
|
|
112
|
+
context: ApiContext = None,
|
|
113
|
+
copy_annotations: bool = False,
|
|
114
|
+
video_annotation: VideoAnnotation = None,
|
|
115
|
+
) -> List[int]:
|
|
116
|
+
if sample_info is None:
|
|
117
|
+
sample_info = {}
|
|
118
|
+
if context is not None:
|
|
119
|
+
if dataset_id not in context.children_items:
|
|
120
|
+
context.children_items[dataset_id] = api.image.get_list(dataset_id)
|
|
121
|
+
existing_images = context.children_items[dataset_id]
|
|
122
|
+
else:
|
|
123
|
+
existing_images = api.image.get_list(dataset_id)
|
|
124
|
+
|
|
125
|
+
name_to_info = {image.name: image for image in existing_images}
|
|
126
|
+
|
|
127
|
+
image_ids = [None] * len(frames)
|
|
128
|
+
to_upload = []
|
|
129
|
+
for i, index in enumerate(indices):
|
|
130
|
+
digits = len(str(video_frames_count))
|
|
131
|
+
image_name = f"{video_name}_frame_{str(index).zfill(digits)}.png"
|
|
132
|
+
if image_name in name_to_info:
|
|
133
|
+
image_ids[i] = name_to_info[image_name].id
|
|
134
|
+
else:
|
|
135
|
+
to_upload.append((image_name, i))
|
|
136
|
+
|
|
137
|
+
if to_upload:
|
|
138
|
+
frames = [frames[i] for _, i in to_upload]
|
|
139
|
+
names = [name for name, _ in to_upload]
|
|
140
|
+
metas = [{**sample_info, "frame_index": i} for _, i in to_upload]
|
|
141
|
+
uploaded = api.image.upload_nps(
|
|
142
|
+
dataset_id=dataset_id,
|
|
143
|
+
names=names,
|
|
144
|
+
imgs=frames,
|
|
145
|
+
metas=metas,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
for image_info, (_, i) in zip(uploaded, to_upload):
|
|
149
|
+
image_ids[i] = image_info.id
|
|
150
|
+
|
|
151
|
+
if copy_annotations:
|
|
152
|
+
_upload_annotations(api, image_ids, indices, video_annotation)
|
|
153
|
+
|
|
154
|
+
return image_ids
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def sample_video(
|
|
158
|
+
api: Api,
|
|
159
|
+
video_id: int,
|
|
160
|
+
dst_dataset_info: DatasetInfo,
|
|
161
|
+
settings: Dict,
|
|
162
|
+
sample_info: Dict = None,
|
|
163
|
+
context: ApiContext = None,
|
|
164
|
+
progress: tqdm_sly = None,
|
|
165
|
+
):
|
|
166
|
+
dst_parent_info = dst_dataset_info
|
|
167
|
+
only_annotated = settings.get(SamplingSettings.ONLY_ANNOTATED, False)
|
|
168
|
+
start_frame = settings.get(SamplingSettings.START, 0)
|
|
169
|
+
end_frame = settings.get(SamplingSettings.END, None)
|
|
170
|
+
step = settings.get(SamplingSettings.STEP, 1)
|
|
171
|
+
resize = settings.get(SamplingSettings.RESIZE, None)
|
|
172
|
+
copy_annotations = settings.get(SamplingSettings.COPY_ANNOTATIONS, False)
|
|
173
|
+
|
|
174
|
+
if context is None:
|
|
175
|
+
context = ApiContext()
|
|
176
|
+
if video_id not in context.video_info:
|
|
177
|
+
context.video_info[video_id] = api.video.get_info_by_id(video_id)
|
|
178
|
+
video_info = context.video_info[video_id]
|
|
179
|
+
|
|
180
|
+
project_id = video_info.project_id
|
|
181
|
+
if project_id not in context.project_meta:
|
|
182
|
+
context.project_meta[project_id] = ProjectMeta.from_json(api.project.get_meta(project_id))
|
|
183
|
+
project_meta = context.project_meta[project_id]
|
|
184
|
+
|
|
185
|
+
video_annotation = VideoAnnotation.from_json(
|
|
186
|
+
api.video.annotation.download(video_info.id), project_meta, key_id_map=KeyIdMap()
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
progress_cb = None
|
|
190
|
+
if progress is not None:
|
|
191
|
+
size = int(video_info.file_meta["size"])
|
|
192
|
+
progress.reset(size)
|
|
193
|
+
progress.unit = "B"
|
|
194
|
+
progress.unit_scale = True
|
|
195
|
+
progress.unit_divisor = 1024
|
|
196
|
+
progress.message = f"Downloading {video_info.name} [{video_info.id}]"
|
|
197
|
+
progress.desc = progress.message
|
|
198
|
+
progress.refresh()
|
|
199
|
+
progress_cb = progress.update
|
|
200
|
+
|
|
201
|
+
video_path = f"/tmp/{video_info.name}"
|
|
202
|
+
api.video.download_path(video_info.id, video_path, progress_cb=progress_cb)
|
|
203
|
+
|
|
204
|
+
frame_indices = _get_frame_indices(
|
|
205
|
+
video_info.frames_count, start_frame, end_frame, step, only_annotated, video_annotation
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
dst_dataset_info = _get_or_create_dst_dataset(
|
|
209
|
+
api=api,
|
|
210
|
+
src_info=video_info,
|
|
211
|
+
dst_parent_info=dst_parent_info,
|
|
212
|
+
sample_info=sample_info,
|
|
213
|
+
context=context,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
if progress is not None:
|
|
217
|
+
progress.reset(len(frame_indices))
|
|
218
|
+
progress.unit = "it"
|
|
219
|
+
progress.unit_scale = False
|
|
220
|
+
progress.unit_divisor = 1000
|
|
221
|
+
progress.message = f"Processing {video_info.name} [{video_info.id}]"
|
|
222
|
+
progress.desc = progress.message
|
|
223
|
+
progress.miniters = 1
|
|
224
|
+
progress.refresh()
|
|
225
|
+
|
|
226
|
+
with VideoFrameReader(video_path, frame_indices) as reader:
|
|
227
|
+
for batch in batched_iter(zip(reader, frame_indices), 10):
|
|
228
|
+
frames, indices = zip(*batch)
|
|
229
|
+
for frame in frames:
|
|
230
|
+
if resize:
|
|
231
|
+
cv2.resize(frame, [*resize, frame.shape[2]], interpolation=cv2.INTER_LINEAR)
|
|
232
|
+
|
|
233
|
+
image_ids = _upload_frames(
|
|
234
|
+
api=api,
|
|
235
|
+
frames=frames,
|
|
236
|
+
video_name=video_info.name,
|
|
237
|
+
video_frames_count=video_info.frames_count,
|
|
238
|
+
indices=indices,
|
|
239
|
+
dataset_id=dst_dataset_info.id,
|
|
240
|
+
sample_info=sample_info,
|
|
241
|
+
context=context,
|
|
242
|
+
copy_annotations=copy_annotations,
|
|
243
|
+
video_annotation=video_annotation,
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
if progress is not None:
|
|
247
|
+
progress.update(len(image_ids))
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def _get_or_create_dst_dataset(
|
|
251
|
+
api: Api,
|
|
252
|
+
src_info: Union[DatasetInfo, VideoInfo],
|
|
253
|
+
dst_parent_info: Union[ProjectInfo, DatasetInfo],
|
|
254
|
+
sample_info: Dict = None,
|
|
255
|
+
context: ApiContext = None,
|
|
256
|
+
) -> DatasetInfo:
|
|
257
|
+
if isinstance(dst_parent_info, ProjectInfo):
|
|
258
|
+
parent = None
|
|
259
|
+
project_id = dst_parent_info.id
|
|
260
|
+
else:
|
|
261
|
+
parent = dst_parent_info.id
|
|
262
|
+
project_id = dst_parent_info.project_id
|
|
263
|
+
|
|
264
|
+
if context is not None:
|
|
265
|
+
if dst_parent_info.id not in context.children_datasets:
|
|
266
|
+
context.children_datasets[dst_parent_info.id] = api.dataset.get_list(
|
|
267
|
+
project_id, parent_id=parent
|
|
268
|
+
)
|
|
269
|
+
dst_datasets = context.children_datasets[dst_parent_info.id]
|
|
270
|
+
else:
|
|
271
|
+
dst_datasets = api.dataset.get_list(project_id, parent_id=parent)
|
|
272
|
+
|
|
273
|
+
for dst_dataset in dst_datasets:
|
|
274
|
+
if dst_dataset.name == src_info.name:
|
|
275
|
+
return dst_dataset
|
|
276
|
+
|
|
277
|
+
if isinstance(src_info, DatasetInfo):
|
|
278
|
+
src_dataset_info = src_info
|
|
279
|
+
description = (
|
|
280
|
+
f"Sample dataset made from project #{src_info.parent_id}, dataset #{src_info.id}"
|
|
281
|
+
)
|
|
282
|
+
else:
|
|
283
|
+
if context is not None:
|
|
284
|
+
if src_info.dataset_id not in context.dataset_info:
|
|
285
|
+
context.dataset_info[src_info.dataset_id] = api.dataset.get_info_by_id(
|
|
286
|
+
src_info.dataset_id
|
|
287
|
+
)
|
|
288
|
+
src_dataset_info = context.dataset_info[src_info.dataset_id]
|
|
289
|
+
else:
|
|
290
|
+
src_dataset_info = api.dataset.get_info_by_id(src_info.dataset_id)
|
|
291
|
+
description = f"Sample dataset made from project #{src_info.project_id}, dataset #{src_info.dataset_id}, video #{src_info.id}"
|
|
292
|
+
|
|
293
|
+
dst_dataset = api.dataset.create(
|
|
294
|
+
project_id,
|
|
295
|
+
name=src_info.name,
|
|
296
|
+
description=description,
|
|
297
|
+
parent_id=parent,
|
|
298
|
+
)
|
|
299
|
+
if sample_info is None:
|
|
300
|
+
if context is not None:
|
|
301
|
+
if src_info.project_id not in context.project_info:
|
|
302
|
+
context.project_info[src_info.project_id] = api.project.get_info_by_id(
|
|
303
|
+
src_info.project_id
|
|
304
|
+
)
|
|
305
|
+
src_project_info = context.project_info[src_info.project_id]
|
|
306
|
+
else:
|
|
307
|
+
src_project_info = api.project.get_info_by_id(src_info.project_id)
|
|
308
|
+
sample_info = {
|
|
309
|
+
"is_sample": True,
|
|
310
|
+
"video_project_id": src_info.project_id,
|
|
311
|
+
"video_project_name": src_project_info.name,
|
|
312
|
+
}
|
|
313
|
+
sample_info.update(
|
|
314
|
+
{
|
|
315
|
+
"video_dataset_id": src_dataset_info.id,
|
|
316
|
+
"video_dataset_name": src_dataset_info.name,
|
|
317
|
+
}
|
|
318
|
+
)
|
|
319
|
+
if isinstance(src_info, VideoInfo):
|
|
320
|
+
sample_info.update(
|
|
321
|
+
{
|
|
322
|
+
"video_id": src_info.id,
|
|
323
|
+
"video_name": src_info.name,
|
|
324
|
+
}
|
|
325
|
+
)
|
|
326
|
+
api.dataset.update_custom_data(
|
|
327
|
+
dst_dataset.id,
|
|
328
|
+
custom_data=sample_info,
|
|
329
|
+
)
|
|
330
|
+
if context is not None:
|
|
331
|
+
context.dataset_info[dst_dataset.id] = dst_dataset
|
|
332
|
+
return dst_dataset
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def sample_video_dataset(
|
|
336
|
+
api: Api,
|
|
337
|
+
src_dataset_id: int,
|
|
338
|
+
dst_parent_info: Union[ProjectInfo, DatasetInfo],
|
|
339
|
+
settings: Dict,
|
|
340
|
+
sample_info: Dict = None,
|
|
341
|
+
context: ApiContext = None,
|
|
342
|
+
datasets_ids_whitelist: List[int] = None,
|
|
343
|
+
items_progress_cb: tqdm_sly = None,
|
|
344
|
+
video_progress: tqdm_sly = None,
|
|
345
|
+
):
|
|
346
|
+
if context is None:
|
|
347
|
+
context = ApiContext()
|
|
348
|
+
|
|
349
|
+
if not (
|
|
350
|
+
datasets_ids_whitelist is None
|
|
351
|
+
or src_dataset_id in datasets_ids_whitelist # this dataset should be sampled
|
|
352
|
+
or _has_children_datasets(
|
|
353
|
+
api, src_dataset_id, context=context, children_ids=datasets_ids_whitelist
|
|
354
|
+
) # has children datasets that should be sampled
|
|
355
|
+
):
|
|
356
|
+
return None
|
|
357
|
+
|
|
358
|
+
src_dataset_info = context.dataset_info.get(src_dataset_id, None)
|
|
359
|
+
if src_dataset_info is None:
|
|
360
|
+
src_dataset_info = api.dataset.get_info_by_id(src_dataset_id)
|
|
361
|
+
context.dataset_info[src_dataset_id] = src_dataset_info
|
|
362
|
+
|
|
363
|
+
dst_dataset = _get_or_create_dst_dataset(
|
|
364
|
+
api=api,
|
|
365
|
+
src_info=src_dataset_info,
|
|
366
|
+
dst_parent_info=dst_parent_info,
|
|
367
|
+
sample_info=sample_info,
|
|
368
|
+
context=context,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if datasets_ids_whitelist is None or src_dataset_id in datasets_ids_whitelist:
|
|
372
|
+
video_infos = api.video.get_list(src_dataset_id)
|
|
373
|
+
for video_info in video_infos:
|
|
374
|
+
sample_video(
|
|
375
|
+
api=api,
|
|
376
|
+
video_id=video_info.id,
|
|
377
|
+
dst_dataset_info=dst_dataset,
|
|
378
|
+
settings=settings,
|
|
379
|
+
sample_info=sample_info.copy(),
|
|
380
|
+
context=context,
|
|
381
|
+
progress=video_progress,
|
|
382
|
+
)
|
|
383
|
+
if items_progress_cb is not None:
|
|
384
|
+
items_progress_cb()
|
|
385
|
+
|
|
386
|
+
if src_dataset_id not in context.children_datasets:
|
|
387
|
+
if src_dataset_id not in context.dataset_info:
|
|
388
|
+
context.dataset_info[src_dataset_id] = api.dataset.get_info_by_id(src_dataset_id)
|
|
389
|
+
src_dataset_info = context.dataset_info[src_dataset_id]
|
|
390
|
+
context.children_datasets[src_dataset_id] = api.dataset.get_list(
|
|
391
|
+
src_dataset_info.project_id, parent_id=src_dataset_info.id
|
|
392
|
+
)
|
|
393
|
+
datasets = context.children_datasets[src_dataset_id]
|
|
394
|
+
for dataset in datasets:
|
|
395
|
+
sample_video_dataset(
|
|
396
|
+
api=api,
|
|
397
|
+
src_dataset_id=dataset.id,
|
|
398
|
+
dst_parent_info=dst_dataset,
|
|
399
|
+
settings=settings,
|
|
400
|
+
datasets_ids_whitelist=datasets_ids_whitelist,
|
|
401
|
+
sample_info=sample_info,
|
|
402
|
+
context=context,
|
|
403
|
+
items_progress_cb=items_progress_cb,
|
|
404
|
+
video_progress=video_progress,
|
|
405
|
+
)
|
|
406
|
+
return dst_dataset
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def _update_meta(
|
|
410
|
+
api: Api, src_project_meta: ProjectMeta, dst_project_id: int, context: ApiContext = None
|
|
411
|
+
):
|
|
412
|
+
if context is None:
|
|
413
|
+
context = ApiContext()
|
|
414
|
+
if dst_project_id not in context.project_meta:
|
|
415
|
+
context.project_meta[dst_project_id] = ProjectMeta.from_json(
|
|
416
|
+
api.project.get_meta(dst_project_id)
|
|
417
|
+
)
|
|
418
|
+
dst_project_meta = context.project_meta[dst_project_id]
|
|
419
|
+
dst_project_meta.merge(src_project_meta)
|
|
420
|
+
if dst_project_meta.get_tag_meta(VIDEO_OBJECT_TAG_META.name) is None:
|
|
421
|
+
dst_project_meta.add_tag_meta(VIDEO_OBJECT_TAG_META)
|
|
422
|
+
if dst_project_meta.get_tag_meta(AUTO_TRACKED_TAG_META.name) is None:
|
|
423
|
+
dst_project_meta.add_tag_meta(AUTO_TRACKED_TAG_META)
|
|
424
|
+
|
|
425
|
+
if dst_project_meta != src_project_meta:
|
|
426
|
+
api.project.update_meta(dst_project_id, dst_project_meta.to_json())
|
|
427
|
+
context.project_meta[dst_project_id] = dst_project_meta
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
def _get_or_create_dst_project(
|
|
431
|
+
api: Api,
|
|
432
|
+
src_project_id: int,
|
|
433
|
+
dst_project_id: Union[int, None] = None,
|
|
434
|
+
sample_info: Dict = None,
|
|
435
|
+
context: ApiContext = None,
|
|
436
|
+
) -> ProjectInfo:
|
|
437
|
+
if dst_project_id is None:
|
|
438
|
+
# get source project info
|
|
439
|
+
if context is None:
|
|
440
|
+
src_project_info = api.project.get_info_by_id(src_project_id)
|
|
441
|
+
else:
|
|
442
|
+
if src_project_id not in context.project_info:
|
|
443
|
+
context.project_info[src_project_id] = api.project.get_info_by_id(src_project_id)
|
|
444
|
+
src_project_info = context.project_info[src_project_id]
|
|
445
|
+
# create new project
|
|
446
|
+
if sample_info is None:
|
|
447
|
+
sample_info = {}
|
|
448
|
+
sample_info.update(
|
|
449
|
+
{
|
|
450
|
+
"is_sample": True,
|
|
451
|
+
"video_project_id": src_project_id,
|
|
452
|
+
"video_project_name": src_project_info.name,
|
|
453
|
+
}
|
|
454
|
+
)
|
|
455
|
+
dst_project = api.project.create(
|
|
456
|
+
src_project_info.workspace_id,
|
|
457
|
+
f"{src_project_info.name}(images)",
|
|
458
|
+
description=f"Sample project made from project #{src_project_info.id}",
|
|
459
|
+
change_name_if_conflict=True,
|
|
460
|
+
)
|
|
461
|
+
api.project.update_custom_data(dst_project.id, sample_info)
|
|
462
|
+
else:
|
|
463
|
+
# use existing project
|
|
464
|
+
dst_project = api.project.get_info_by_id(dst_project_id)
|
|
465
|
+
if context is not None:
|
|
466
|
+
context.project_info[dst_project.id] = dst_project
|
|
467
|
+
if src_project_id not in context.project_meta:
|
|
468
|
+
context.project_meta[src_project_id] = ProjectMeta.from_json(
|
|
469
|
+
api.project.get_meta(src_project_id)
|
|
470
|
+
)
|
|
471
|
+
src_project_meta = context.project_meta[src_project_id]
|
|
472
|
+
_update_meta(api, src_project_meta, dst_project.id, context=context)
|
|
473
|
+
return dst_project
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def _has_children_datasets(
|
|
477
|
+
api: Api, dataset_id: int, context: ApiContext = None, children_ids: List[int] = None
|
|
478
|
+
) -> bool:
|
|
479
|
+
if context is None:
|
|
480
|
+
context = ApiContext()
|
|
481
|
+
|
|
482
|
+
if dataset_id not in context.dataset_info:
|
|
483
|
+
context.dataset_info[dataset_id] = api.dataset.get_info_by_id(dataset_id)
|
|
484
|
+
dataset_info = context.dataset_info[dataset_id]
|
|
485
|
+
if dataset_id not in context.children_datasets:
|
|
486
|
+
context.children_datasets[dataset_id] = api.dataset.get_list(
|
|
487
|
+
project_id=dataset_info.project_id, parent_id=dataset_id
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
if children_ids is None:
|
|
491
|
+
return len(context.children_datasets[dataset_id]) > 0
|
|
492
|
+
for child_dataset in context.children_datasets[dataset_id]:
|
|
493
|
+
if child_dataset.id in children_ids:
|
|
494
|
+
return True
|
|
495
|
+
if _has_children_datasets(
|
|
496
|
+
api, child_dataset.id, context=context, children_ids=children_ids
|
|
497
|
+
):
|
|
498
|
+
return True
|
|
499
|
+
return False
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
def sample_video_project(
|
|
503
|
+
api: Api,
|
|
504
|
+
project_id: int,
|
|
505
|
+
settings: Dict,
|
|
506
|
+
dst_project_id: Union[int, None] = None,
|
|
507
|
+
datasets_ids: List[int] = None,
|
|
508
|
+
context: ApiContext = None,
|
|
509
|
+
items_progress_cb: tqdm_sly = None,
|
|
510
|
+
video_progress: tqdm_sly = None,
|
|
511
|
+
):
|
|
512
|
+
if context is None:
|
|
513
|
+
context = ApiContext()
|
|
514
|
+
|
|
515
|
+
if project_id not in context.project_info:
|
|
516
|
+
context.project_info[project_id] = api.project.get_info_by_id(project_id)
|
|
517
|
+
|
|
518
|
+
src_project_info = context.project_info[project_id]
|
|
519
|
+
|
|
520
|
+
sample_info = {
|
|
521
|
+
"is_sample": True,
|
|
522
|
+
"video_project_id": src_project_info.id,
|
|
523
|
+
"video_project_name": src_project_info.name,
|
|
524
|
+
}
|
|
525
|
+
dst_project_info = _get_or_create_dst_project(
|
|
526
|
+
api, project_id, dst_project_id, sample_info, context
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
# non recursive. Nested datasets are handled by sample_video_dataset
|
|
530
|
+
if project_id not in context.children_datasets:
|
|
531
|
+
datasets = api.dataset.get_list(project_id)
|
|
532
|
+
context.children_datasets[project_id] = datasets
|
|
533
|
+
|
|
534
|
+
dataset_infos = context.children_datasets[project_id]
|
|
535
|
+
for dataset_info in dataset_infos:
|
|
536
|
+
context.dataset_info[dataset_info.id] = dataset_info
|
|
537
|
+
for dataset_info in dataset_infos:
|
|
538
|
+
sample_video_dataset(
|
|
539
|
+
api=api,
|
|
540
|
+
src_dataset_id=dataset_info.id,
|
|
541
|
+
dst_parent_info=dst_project_info,
|
|
542
|
+
settings=settings,
|
|
543
|
+
sample_info=sample_info,
|
|
544
|
+
context=context,
|
|
545
|
+
datasets_ids_whitelist=datasets_ids,
|
|
546
|
+
items_progress_cb=items_progress_cb,
|
|
547
|
+
video_progress=video_progress,
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
return dst_project_info
|