dtlpy 1.115.44__py3-none-any.whl → 1.117.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dtlpy/__init__.py +491 -491
- dtlpy/__version__.py +1 -1
- dtlpy/assets/__init__.py +26 -26
- dtlpy/assets/code_server/config.yaml +2 -2
- dtlpy/assets/code_server/installation.sh +24 -24
- dtlpy/assets/code_server/launch.json +13 -13
- dtlpy/assets/code_server/settings.json +2 -2
- dtlpy/assets/main.py +53 -53
- dtlpy/assets/main_partial.py +18 -18
- dtlpy/assets/mock.json +11 -11
- dtlpy/assets/model_adapter.py +83 -83
- dtlpy/assets/package.json +61 -61
- dtlpy/assets/package_catalog.json +29 -29
- dtlpy/assets/package_gitignore +307 -307
- dtlpy/assets/service_runners/__init__.py +33 -33
- dtlpy/assets/service_runners/converter.py +96 -96
- dtlpy/assets/service_runners/multi_method.py +49 -49
- dtlpy/assets/service_runners/multi_method_annotation.py +54 -54
- dtlpy/assets/service_runners/multi_method_dataset.py +55 -55
- dtlpy/assets/service_runners/multi_method_item.py +52 -52
- dtlpy/assets/service_runners/multi_method_json.py +52 -52
- dtlpy/assets/service_runners/single_method.py +37 -37
- dtlpy/assets/service_runners/single_method_annotation.py +43 -43
- dtlpy/assets/service_runners/single_method_dataset.py +43 -43
- dtlpy/assets/service_runners/single_method_item.py +41 -41
- dtlpy/assets/service_runners/single_method_json.py +42 -42
- dtlpy/assets/service_runners/single_method_multi_input.py +45 -45
- dtlpy/assets/voc_annotation_template.xml +23 -23
- dtlpy/caches/base_cache.py +32 -32
- dtlpy/caches/cache.py +473 -473
- dtlpy/caches/dl_cache.py +201 -201
- dtlpy/caches/filesystem_cache.py +89 -89
- dtlpy/caches/redis_cache.py +84 -84
- dtlpy/dlp/__init__.py +20 -20
- dtlpy/dlp/cli_utilities.py +367 -367
- dtlpy/dlp/command_executor.py +764 -764
- dtlpy/dlp/dlp +1 -1
- dtlpy/dlp/dlp.bat +1 -1
- dtlpy/dlp/dlp.py +128 -128
- dtlpy/dlp/parser.py +651 -651
- dtlpy/entities/__init__.py +83 -83
- dtlpy/entities/analytic.py +347 -347
- dtlpy/entities/annotation.py +1879 -1879
- dtlpy/entities/annotation_collection.py +699 -699
- dtlpy/entities/annotation_definitions/__init__.py +20 -20
- dtlpy/entities/annotation_definitions/base_annotation_definition.py +100 -100
- dtlpy/entities/annotation_definitions/box.py +195 -195
- dtlpy/entities/annotation_definitions/classification.py +67 -67
- dtlpy/entities/annotation_definitions/comparison.py +72 -72
- dtlpy/entities/annotation_definitions/cube.py +204 -204
- dtlpy/entities/annotation_definitions/cube_3d.py +149 -149
- dtlpy/entities/annotation_definitions/description.py +32 -32
- dtlpy/entities/annotation_definitions/ellipse.py +124 -124
- dtlpy/entities/annotation_definitions/free_text.py +62 -62
- dtlpy/entities/annotation_definitions/gis.py +69 -69
- dtlpy/entities/annotation_definitions/note.py +139 -139
- dtlpy/entities/annotation_definitions/point.py +117 -117
- dtlpy/entities/annotation_definitions/polygon.py +182 -182
- dtlpy/entities/annotation_definitions/polyline.py +111 -111
- dtlpy/entities/annotation_definitions/pose.py +92 -92
- dtlpy/entities/annotation_definitions/ref_image.py +86 -86
- dtlpy/entities/annotation_definitions/segmentation.py +240 -240
- dtlpy/entities/annotation_definitions/subtitle.py +34 -34
- dtlpy/entities/annotation_definitions/text.py +85 -85
- dtlpy/entities/annotation_definitions/undefined_annotation.py +74 -74
- dtlpy/entities/app.py +220 -220
- dtlpy/entities/app_module.py +107 -107
- dtlpy/entities/artifact.py +174 -174
- dtlpy/entities/assignment.py +399 -399
- dtlpy/entities/base_entity.py +214 -214
- dtlpy/entities/bot.py +113 -113
- dtlpy/entities/codebase.py +292 -292
- dtlpy/entities/collection.py +38 -38
- dtlpy/entities/command.py +169 -169
- dtlpy/entities/compute.py +449 -449
- dtlpy/entities/dataset.py +1299 -1299
- dtlpy/entities/directory_tree.py +44 -44
- dtlpy/entities/dpk.py +470 -470
- dtlpy/entities/driver.py +235 -235
- dtlpy/entities/execution.py +397 -397
- dtlpy/entities/feature.py +124 -124
- dtlpy/entities/feature_set.py +152 -145
- dtlpy/entities/filters.py +798 -798
- dtlpy/entities/gis_item.py +107 -107
- dtlpy/entities/integration.py +184 -184
- dtlpy/entities/item.py +975 -959
- dtlpy/entities/label.py +123 -123
- dtlpy/entities/links.py +85 -85
- dtlpy/entities/message.py +175 -175
- dtlpy/entities/model.py +684 -684
- dtlpy/entities/node.py +1005 -1005
- dtlpy/entities/ontology.py +810 -803
- dtlpy/entities/organization.py +287 -287
- dtlpy/entities/package.py +657 -657
- dtlpy/entities/package_defaults.py +5 -5
- dtlpy/entities/package_function.py +185 -185
- dtlpy/entities/package_module.py +113 -113
- dtlpy/entities/package_slot.py +118 -118
- dtlpy/entities/paged_entities.py +299 -299
- dtlpy/entities/pipeline.py +624 -624
- dtlpy/entities/pipeline_execution.py +279 -279
- dtlpy/entities/project.py +394 -394
- dtlpy/entities/prompt_item.py +505 -505
- dtlpy/entities/recipe.py +301 -301
- dtlpy/entities/reflect_dict.py +102 -102
- dtlpy/entities/resource_execution.py +138 -138
- dtlpy/entities/service.py +974 -963
- dtlpy/entities/service_driver.py +117 -117
- dtlpy/entities/setting.py +294 -294
- dtlpy/entities/task.py +495 -495
- dtlpy/entities/time_series.py +143 -143
- dtlpy/entities/trigger.py +426 -426
- dtlpy/entities/user.py +118 -118
- dtlpy/entities/webhook.py +124 -124
- dtlpy/examples/__init__.py +19 -19
- dtlpy/examples/add_labels.py +135 -135
- dtlpy/examples/add_metadata_to_item.py +21 -21
- dtlpy/examples/annotate_items_using_model.py +65 -65
- dtlpy/examples/annotate_video_using_model_and_tracker.py +75 -75
- dtlpy/examples/annotations_convert_to_voc.py +9 -9
- dtlpy/examples/annotations_convert_to_yolo.py +9 -9
- dtlpy/examples/convert_annotation_types.py +51 -51
- dtlpy/examples/converter.py +143 -143
- dtlpy/examples/copy_annotations.py +22 -22
- dtlpy/examples/copy_folder.py +31 -31
- dtlpy/examples/create_annotations.py +51 -51
- dtlpy/examples/create_video_annotations.py +83 -83
- dtlpy/examples/delete_annotations.py +26 -26
- dtlpy/examples/filters.py +113 -113
- dtlpy/examples/move_item.py +23 -23
- dtlpy/examples/play_video_annotation.py +13 -13
- dtlpy/examples/show_item_and_mask.py +53 -53
- dtlpy/examples/triggers.py +49 -49
- dtlpy/examples/upload_batch_of_items.py +20 -20
- dtlpy/examples/upload_items_and_custom_format_annotations.py +55 -55
- dtlpy/examples/upload_items_with_modalities.py +43 -43
- dtlpy/examples/upload_segmentation_annotations_from_mask_image.py +44 -44
- dtlpy/examples/upload_yolo_format_annotations.py +70 -70
- dtlpy/exceptions.py +125 -125
- dtlpy/miscellaneous/__init__.py +20 -20
- dtlpy/miscellaneous/dict_differ.py +95 -95
- dtlpy/miscellaneous/git_utils.py +217 -217
- dtlpy/miscellaneous/json_utils.py +14 -14
- dtlpy/miscellaneous/list_print.py +105 -105
- dtlpy/miscellaneous/zipping.py +130 -130
- dtlpy/ml/__init__.py +20 -20
- dtlpy/ml/base_feature_extractor_adapter.py +27 -27
- dtlpy/ml/base_model_adapter.py +1287 -1230
- dtlpy/ml/metrics.py +461 -461
- dtlpy/ml/predictions_utils.py +274 -274
- dtlpy/ml/summary_writer.py +57 -57
- dtlpy/ml/train_utils.py +60 -60
- dtlpy/new_instance.py +252 -252
- dtlpy/repositories/__init__.py +56 -56
- dtlpy/repositories/analytics.py +85 -85
- dtlpy/repositories/annotations.py +916 -916
- dtlpy/repositories/apps.py +383 -383
- dtlpy/repositories/artifacts.py +452 -452
- dtlpy/repositories/assignments.py +599 -599
- dtlpy/repositories/bots.py +213 -213
- dtlpy/repositories/codebases.py +559 -559
- dtlpy/repositories/collections.py +332 -332
- dtlpy/repositories/commands.py +152 -152
- dtlpy/repositories/compositions.py +61 -61
- dtlpy/repositories/computes.py +439 -439
- dtlpy/repositories/datasets.py +1585 -1504
- dtlpy/repositories/downloader.py +1157 -923
- dtlpy/repositories/dpks.py +433 -433
- dtlpy/repositories/drivers.py +482 -482
- dtlpy/repositories/executions.py +815 -815
- dtlpy/repositories/feature_sets.py +256 -226
- dtlpy/repositories/features.py +255 -255
- dtlpy/repositories/integrations.py +484 -484
- dtlpy/repositories/items.py +912 -912
- dtlpy/repositories/messages.py +94 -94
- dtlpy/repositories/models.py +1000 -1000
- dtlpy/repositories/nodes.py +80 -80
- dtlpy/repositories/ontologies.py +511 -511
- dtlpy/repositories/organizations.py +525 -525
- dtlpy/repositories/packages.py +1941 -1941
- dtlpy/repositories/pipeline_executions.py +451 -451
- dtlpy/repositories/pipelines.py +640 -640
- dtlpy/repositories/projects.py +539 -539
- dtlpy/repositories/recipes.py +429 -399
- dtlpy/repositories/resource_executions.py +137 -137
- dtlpy/repositories/schema.py +120 -120
- dtlpy/repositories/service_drivers.py +213 -213
- dtlpy/repositories/services.py +1704 -1704
- dtlpy/repositories/settings.py +339 -339
- dtlpy/repositories/tasks.py +1477 -1477
- dtlpy/repositories/times_series.py +278 -278
- dtlpy/repositories/triggers.py +536 -536
- dtlpy/repositories/upload_element.py +257 -257
- dtlpy/repositories/uploader.py +661 -661
- dtlpy/repositories/webhooks.py +249 -249
- dtlpy/services/__init__.py +22 -22
- dtlpy/services/aihttp_retry.py +131 -131
- dtlpy/services/api_client.py +1786 -1785
- dtlpy/services/api_reference.py +40 -40
- dtlpy/services/async_utils.py +133 -133
- dtlpy/services/calls_counter.py +44 -44
- dtlpy/services/check_sdk.py +68 -68
- dtlpy/services/cookie.py +115 -115
- dtlpy/services/create_logger.py +156 -156
- dtlpy/services/events.py +84 -84
- dtlpy/services/logins.py +235 -235
- dtlpy/services/reporter.py +256 -256
- dtlpy/services/service_defaults.py +91 -91
- dtlpy/utilities/__init__.py +20 -20
- dtlpy/utilities/annotations/__init__.py +16 -16
- dtlpy/utilities/annotations/annotation_converters.py +269 -269
- dtlpy/utilities/base_package_runner.py +285 -264
- dtlpy/utilities/converter.py +1650 -1650
- dtlpy/utilities/dataset_generators/__init__.py +1 -1
- dtlpy/utilities/dataset_generators/dataset_generator.py +670 -670
- dtlpy/utilities/dataset_generators/dataset_generator_tensorflow.py +23 -23
- dtlpy/utilities/dataset_generators/dataset_generator_torch.py +21 -21
- dtlpy/utilities/local_development/__init__.py +1 -1
- dtlpy/utilities/local_development/local_session.py +179 -179
- dtlpy/utilities/reports/__init__.py +2 -2
- dtlpy/utilities/reports/figures.py +343 -343
- dtlpy/utilities/reports/report.py +71 -71
- dtlpy/utilities/videos/__init__.py +17 -17
- dtlpy/utilities/videos/video_player.py +598 -598
- dtlpy/utilities/videos/videos.py +470 -470
- {dtlpy-1.115.44.data → dtlpy-1.117.6.data}/scripts/dlp +1 -1
- dtlpy-1.117.6.data/scripts/dlp.bat +2 -0
- {dtlpy-1.115.44.data → dtlpy-1.117.6.data}/scripts/dlp.py +128 -128
- {dtlpy-1.115.44.dist-info → dtlpy-1.117.6.dist-info}/METADATA +186 -186
- dtlpy-1.117.6.dist-info/RECORD +239 -0
- {dtlpy-1.115.44.dist-info → dtlpy-1.117.6.dist-info}/WHEEL +1 -1
- {dtlpy-1.115.44.dist-info → dtlpy-1.117.6.dist-info}/licenses/LICENSE +200 -200
- tests/features/environment.py +551 -551
- dtlpy/assets/__pycache__/__init__.cpython-310.pyc +0 -0
- dtlpy-1.115.44.data/scripts/dlp.bat +0 -2
- dtlpy-1.115.44.dist-info/RECORD +0 -240
- {dtlpy-1.115.44.dist-info → dtlpy-1.117.6.dist-info}/entry_points.txt +0 -0
- {dtlpy-1.115.44.dist-info → dtlpy-1.117.6.dist-info}/top_level.txt +0 -0
dtlpy/utilities/videos/videos.py
CHANGED
|
@@ -1,470 +1,470 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import time
|
|
3
|
-
import types
|
|
4
|
-
|
|
5
|
-
import numpy as np
|
|
6
|
-
import os
|
|
7
|
-
import logging
|
|
8
|
-
import dtlpy as dl
|
|
9
|
-
import shutil
|
|
10
|
-
|
|
11
|
-
logger = logging.getLogger(name='dtlpy')
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
##########
|
|
15
|
-
# Videos #
|
|
16
|
-
##########
|
|
17
|
-
class Videos:
|
|
18
|
-
def __init__(self):
|
|
19
|
-
pass
|
|
20
|
-
|
|
21
|
-
@staticmethod
|
|
22
|
-
def get_info(filepath):
|
|
23
|
-
try:
|
|
24
|
-
import ffmpeg
|
|
25
|
-
except ImportError:
|
|
26
|
-
logger.error(
|
|
27
|
-
'Import Error! Cant import ffmpeg. '
|
|
28
|
-
'Annotations operations will be limited. import manually and fix errors')
|
|
29
|
-
raise
|
|
30
|
-
probe = ffmpeg.probe(filepath)
|
|
31
|
-
return probe
|
|
32
|
-
|
|
33
|
-
@staticmethod
|
|
34
|
-
def get_max_object_id(item):
|
|
35
|
-
max_object_id = 1
|
|
36
|
-
annotations_list = item.annotations.list().annotations
|
|
37
|
-
if len(annotations_list) < 1:
|
|
38
|
-
return 1
|
|
39
|
-
for annotation in annotations_list:
|
|
40
|
-
if annotation.object_id is not None:
|
|
41
|
-
current_object_id = int(annotation.object_id)
|
|
42
|
-
if current_object_id > max_object_id:
|
|
43
|
-
max_object_id = current_object_id
|
|
44
|
-
return max_object_id
|
|
45
|
-
|
|
46
|
-
@staticmethod
|
|
47
|
-
def video_snapshots_generator(item_id=None, item=None, frame_interval=30, image_ext="png"):
|
|
48
|
-
futures = Videos._async_video_snapshots_generator(item_id=item_id,
|
|
49
|
-
item=item,
|
|
50
|
-
frame_interval=frame_interval,
|
|
51
|
-
image_ext=image_ext)
|
|
52
|
-
loop = asyncio.new_event_loop()
|
|
53
|
-
try:
|
|
54
|
-
asyncio.set_event_loop(loop)
|
|
55
|
-
return loop.run_until_complete(futures)
|
|
56
|
-
finally:
|
|
57
|
-
try:
|
|
58
|
-
loop.run_until_complete(loop.shutdown_asyncgens())
|
|
59
|
-
finally:
|
|
60
|
-
asyncio.set_event_loop(None)
|
|
61
|
-
loop.close()
|
|
62
|
-
|
|
63
|
-
@staticmethod
|
|
64
|
-
async def _async_video_snapshots_generator(item_id=None, item=None, frame_interval=30, image_ext="png"):
|
|
65
|
-
"""
|
|
66
|
-
Create video-snapshots
|
|
67
|
-
|
|
68
|
-
:param item_id: item id for the video
|
|
69
|
-
:param item: item id for the video
|
|
70
|
-
:param frame_interval: number of frames to take next snapshot
|
|
71
|
-
:param image_ext: png/jpg
|
|
72
|
-
:return: the uploaded items
|
|
73
|
-
"""
|
|
74
|
-
if item_id is not None:
|
|
75
|
-
item = dl.items.get(item_id=item_id)
|
|
76
|
-
|
|
77
|
-
if item is None:
|
|
78
|
-
raise ValueError('Missing input item (or item_id)')
|
|
79
|
-
|
|
80
|
-
if not isinstance(frame_interval, int):
|
|
81
|
-
raise AttributeError('frame_interval is mast to be integer')
|
|
82
|
-
|
|
83
|
-
if "video" not in item.mimetype:
|
|
84
|
-
raise AttributeError("Got {} file type but only video files are supported".format(item.mimetype))
|
|
85
|
-
|
|
86
|
-
video_path = item.download()
|
|
87
|
-
|
|
88
|
-
# Get the time for single frame from metadata (duration/# of frames)
|
|
89
|
-
if 'system' in item.metadata and \
|
|
90
|
-
'ffmpeg' in item.metadata['system'] and \
|
|
91
|
-
'duration' in item.metadata['system']['ffmpeg'] and \
|
|
92
|
-
'nb_frames' in item.metadata['system']['ffmpeg']:
|
|
93
|
-
nb_frames = int(item.metadata["system"]["ffmpeg"]["nb_frames"])
|
|
94
|
-
duration = float(item.metadata["system"]["ffmpeg"]["duration"])
|
|
95
|
-
video_fps = duration / nb_frames
|
|
96
|
-
else:
|
|
97
|
-
try:
|
|
98
|
-
import cv2
|
|
99
|
-
except (ImportError, ModuleNotFoundError):
|
|
100
|
-
logger.error(
|
|
101
|
-
'Import Error! Cant import cv2. '
|
|
102
|
-
'Annotations operations will be limited. import manually and fix errors')
|
|
103
|
-
raise
|
|
104
|
-
|
|
105
|
-
video = cv2.VideoCapture(video_path)
|
|
106
|
-
video_fps = video.get(cv2.CAP_PROP_FPS)
|
|
107
|
-
nb_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
108
|
-
duration = video_fps * nb_frames
|
|
109
|
-
|
|
110
|
-
images_path = Videos.disassemble(filepath=video_path, frame_interval=frame_interval, image_ext=image_ext)
|
|
111
|
-
snapshots_items = list()
|
|
112
|
-
try:
|
|
113
|
-
# rename files
|
|
114
|
-
images = []
|
|
115
|
-
video_basename = os.path.basename(video_path)
|
|
116
|
-
for f in os.listdir(images_path):
|
|
117
|
-
images.append(f)
|
|
118
|
-
for image in images:
|
|
119
|
-
image_split_name, ext = os.path.splitext(image)
|
|
120
|
-
try:
|
|
121
|
-
frame = int(image_split_name) * frame_interval
|
|
122
|
-
file_frame_name = "{}.frame.{}{}".format(video_basename, frame, ext)
|
|
123
|
-
full_path = os.path.join(images_path, file_frame_name)
|
|
124
|
-
os.rename(os.path.join(images_path, image),
|
|
125
|
-
full_path)
|
|
126
|
-
except Exception as e:
|
|
127
|
-
logger.debug("Rename {} has been failed: {}".format(os.path.join(images_path, image), e))
|
|
128
|
-
|
|
129
|
-
remote_path = os.path.join(os.path.split(item.filename)[0], "snapshots")
|
|
130
|
-
remote_url = '/items/{}/snapshots'.format(item.id)
|
|
131
|
-
to_upload = open(full_path, 'rb')
|
|
132
|
-
try:
|
|
133
|
-
response = await item._client_api.upload_file_async(to_upload=to_upload,
|
|
134
|
-
item_type='file',
|
|
135
|
-
item_size=os.stat(full_path).st_size,
|
|
136
|
-
remote_url=remote_url,
|
|
137
|
-
uploaded_filename=file_frame_name,
|
|
138
|
-
remote_path=remote_path)
|
|
139
|
-
except Exception:
|
|
140
|
-
raise
|
|
141
|
-
finally:
|
|
142
|
-
to_upload.close()
|
|
143
|
-
if response.ok:
|
|
144
|
-
snapshots_items.append(item.from_json(response.json(), item._client_api))
|
|
145
|
-
else:
|
|
146
|
-
raise dl.PlatformException(response)
|
|
147
|
-
|
|
148
|
-
# classification tpe annotation creation for each file
|
|
149
|
-
builder = item.annotations.builder()
|
|
150
|
-
annotation_itemlinks = []
|
|
151
|
-
|
|
152
|
-
max_object_id = Videos().get_max_object_id(item=item)
|
|
153
|
-
for snapshot_item in snapshots_items:
|
|
154
|
-
max_object_id += 1
|
|
155
|
-
item_frame = snapshot_item.name.rsplit("frame.", 1)[1].split(".")[0]
|
|
156
|
-
if item_frame.isnumeric():
|
|
157
|
-
item_time = int(item_frame) * video_fps
|
|
158
|
-
else:
|
|
159
|
-
item_frame = item_time = 0
|
|
160
|
-
|
|
161
|
-
snapshot_item.metadata["system"]["itemLinks"] = [{"type": "snapshotFrom",
|
|
162
|
-
"itemId": item.id,
|
|
163
|
-
"frame": item_frame,
|
|
164
|
-
"time": item_time}]
|
|
165
|
-
|
|
166
|
-
annotation_itemlinks.append({"type": "snapshotTo",
|
|
167
|
-
"itemId": snapshot_item.id,
|
|
168
|
-
"frame": item_frame,
|
|
169
|
-
"time": item_time})
|
|
170
|
-
|
|
171
|
-
snapshot_item.update(system_metadata=True)
|
|
172
|
-
annotation_definition = dl.Classification(label="Snapshot")
|
|
173
|
-
builder.add(annotation_definition=annotation_definition,
|
|
174
|
-
frame_num=int(item_frame),
|
|
175
|
-
end_frame_num=nb_frames if int(item_frame) + int(video_fps) > nb_frames else int(
|
|
176
|
-
item_frame) + int(video_fps),
|
|
177
|
-
start_time=item_time,
|
|
178
|
-
end_time=duration if item_time + 1 > duration else item_time + 1,
|
|
179
|
-
object_id=max_object_id)
|
|
180
|
-
|
|
181
|
-
annotations = item.annotations.upload(annotations=builder)
|
|
182
|
-
|
|
183
|
-
# update system metadata for annotations
|
|
184
|
-
count = 0
|
|
185
|
-
for annotation in annotations:
|
|
186
|
-
annotation.metadata["system"]["itemLinks"] = [annotation_itemlinks[count]]
|
|
187
|
-
count += 1
|
|
188
|
-
|
|
189
|
-
annotations.update(system_metadata=True)
|
|
190
|
-
except Exception as err:
|
|
191
|
-
logger.exception(err)
|
|
192
|
-
finally:
|
|
193
|
-
if os.path.isdir(images_path):
|
|
194
|
-
shutil.rmtree(images_path)
|
|
195
|
-
return snapshots_items
|
|
196
|
-
|
|
197
|
-
@staticmethod
|
|
198
|
-
def disassemble(filepath, fps=None, frame_interval=None, loglevel='panic', image_ext='jpg'):
|
|
199
|
-
"""
|
|
200
|
-
Disassemble video to images
|
|
201
|
-
|
|
202
|
-
:param filepath: input video filepath
|
|
203
|
-
:param fps: rate of disassemble. e.g if 1 frame per second fps is 1. if None all frames will be extracted
|
|
204
|
-
:param frame_interval: take image every frame # (if exists function ignore fps)
|
|
205
|
-
:param image_ext: png/jpg
|
|
206
|
-
:param loglevel: ffmpeg loglevel
|
|
207
|
-
:return:
|
|
208
|
-
"""
|
|
209
|
-
try:
|
|
210
|
-
import ffmpeg
|
|
211
|
-
except ImportError:
|
|
212
|
-
logger.error(
|
|
213
|
-
'Import Error! Cant import ffmpeg. '
|
|
214
|
-
'Annotations operations will be limited. import manually and fix errors')
|
|
215
|
-
raise
|
|
216
|
-
# get video information
|
|
217
|
-
video_props = Videos.get_info(filepath)
|
|
218
|
-
if 'system' in video_props and \
|
|
219
|
-
'nb_frames' in video_props['system'][0]:
|
|
220
|
-
nb_frames = video_props['streams'][0]['nb_frames']
|
|
221
|
-
else:
|
|
222
|
-
try:
|
|
223
|
-
import cv2
|
|
224
|
-
except (ImportError, ModuleNotFoundError):
|
|
225
|
-
logger.error(
|
|
226
|
-
'Import Error! Cant import cv2. '
|
|
227
|
-
'Annotations operations will be limited. import manually and fix errors')
|
|
228
|
-
raise
|
|
229
|
-
nb_frames = int(cv2.VideoCapture(filepath).get(cv2.CAP_PROP_FRAME_COUNT))
|
|
230
|
-
|
|
231
|
-
if not os.path.isfile(filepath):
|
|
232
|
-
raise IOError('File doesnt exists: {}'.format(filepath))
|
|
233
|
-
basename, ext = os.path.splitext(filepath)
|
|
234
|
-
# create folder for the frames
|
|
235
|
-
if os.path.exists(basename):
|
|
236
|
-
shutil.rmtree(basename)
|
|
237
|
-
|
|
238
|
-
os.makedirs(basename, exist_ok=True)
|
|
239
|
-
|
|
240
|
-
if fps is None:
|
|
241
|
-
try:
|
|
242
|
-
fps = eval(video_props['streams'][0]['avg_frame_rate'])
|
|
243
|
-
except ZeroDivisionError:
|
|
244
|
-
fps = 0
|
|
245
|
-
num_of_zeros = len(str(nb_frames))
|
|
246
|
-
# format the output filename
|
|
247
|
-
output_regex = os.path.join(basename, '%0{}d.{}'.format(num_of_zeros, image_ext))
|
|
248
|
-
|
|
249
|
-
try:
|
|
250
|
-
if frame_interval is not None:
|
|
251
|
-
frame_number = 0
|
|
252
|
-
select = ""
|
|
253
|
-
while frame_number < nb_frames:
|
|
254
|
-
if select != "":
|
|
255
|
-
select += '+'
|
|
256
|
-
select += 'eq(n\\,{})'.format(frame_number)
|
|
257
|
-
frame_number += frame_interval
|
|
258
|
-
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,
|
|
259
|
-
**{'start_number': '0',
|
|
260
|
-
'vf': 'select=\'{}'.format(select),
|
|
261
|
-
'vsync': 'vfr'})
|
|
262
|
-
else:
|
|
263
|
-
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,
|
|
264
|
-
**{'start_number': '0',
|
|
265
|
-
'r': str(fps)})
|
|
266
|
-
|
|
267
|
-
ffmpeg.overwrite_output(stream).run()
|
|
268
|
-
except Exception:
|
|
269
|
-
logger.error('ffmpeg error in disassemble:')
|
|
270
|
-
raise
|
|
271
|
-
return basename
|
|
272
|
-
|
|
273
|
-
@staticmethod
|
|
274
|
-
def reencode(filepath, loglevel='panic'):
|
|
275
|
-
"""
|
|
276
|
-
Re-encode video as mp4, remove start offset and set bframes to 0
|
|
277
|
-
|
|
278
|
-
:param filepath: input video file
|
|
279
|
-
:param loglevel: ffmpeg loglevel
|
|
280
|
-
:return:
|
|
281
|
-
"""
|
|
282
|
-
try:
|
|
283
|
-
import ffmpeg
|
|
284
|
-
except ImportError:
|
|
285
|
-
logger.error(
|
|
286
|
-
'Import Error! Cant import ffmpeg. '
|
|
287
|
-
'Annotations operations will be limited. import manually and fix errors')
|
|
288
|
-
raise
|
|
289
|
-
if not os.path.isfile(filepath):
|
|
290
|
-
raise IOError('File doesnt exists: {}'.format(filepath))
|
|
291
|
-
# re encode video without b frame and as mp4
|
|
292
|
-
basename, ext = os.path.splitext(filepath)
|
|
293
|
-
output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))
|
|
294
|
-
if not os.path.isdir(os.path.dirname(output_filepath)):
|
|
295
|
-
os.makedirs(os.path.dirname(output_filepath))
|
|
296
|
-
try:
|
|
297
|
-
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,
|
|
298
|
-
**{'x264opts': 'bframes=0',
|
|
299
|
-
'f': 'mp4'})
|
|
300
|
-
ffmpeg.overwrite_output(stream).run()
|
|
301
|
-
except Exception as e:
|
|
302
|
-
logger.exception('ffmpeg error in disassemble:')
|
|
303
|
-
raise
|
|
304
|
-
|
|
305
|
-
output_probe = Videos.get_info(output_filepath)
|
|
306
|
-
start_time = eval(output_probe['streams'][0]['start_time'])
|
|
307
|
-
fps = eval(output_probe['streams'][0]['avg_frame_rate'])
|
|
308
|
-
has_b_frames = output_probe['streams'][0]['has_b_frames']
|
|
309
|
-
start_frame = fps * start_time
|
|
310
|
-
if start_time != 0:
|
|
311
|
-
logger.warning('Video start_time is not 0!')
|
|
312
|
-
if has_b_frames != 0:
|
|
313
|
-
logger.warning('Video still has b frames!')
|
|
314
|
-
return output_filepath
|
|
315
|
-
|
|
316
|
-
@staticmethod
|
|
317
|
-
def split_and_upload(filepath,
|
|
318
|
-
# upload parameters
|
|
319
|
-
project_name=None, project_id=None, dataset_name=None, dataset_id=None, remote_path=None,
|
|
320
|
-
# split parameters
|
|
321
|
-
split_seconds=None, split_chunks=None, split_pairs=None,
|
|
322
|
-
loglevel='panic'):
|
|
323
|
-
"""
|
|
324
|
-
Split video to chunks and upload to platform
|
|
325
|
-
|
|
326
|
-
:param filepath: input video file
|
|
327
|
-
:param project_name:
|
|
328
|
-
:param project_id:
|
|
329
|
-
:param dataset_name:
|
|
330
|
-
:param dataset_id:
|
|
331
|
-
:param remote_path:
|
|
332
|
-
:param split_seconds: split by seconds per chunk. each chunk's length will be this in seconds
|
|
333
|
-
:param split_chunks: split by number of chunks.
|
|
334
|
-
:param split_pairs: a list od (start, stop) segments to split in seconds . e.g [(0,400), (600,800)]
|
|
335
|
-
:param loglevel: ffmpeg loglevel
|
|
336
|
-
:return:
|
|
337
|
-
"""
|
|
338
|
-
try:
|
|
339
|
-
import ffmpeg
|
|
340
|
-
except ImportError:
|
|
341
|
-
logger.error(
|
|
342
|
-
'Import Error! Cant import ffmpeg. '
|
|
343
|
-
'Annotations operations will be limited. import manually and fix errors')
|
|
344
|
-
raise
|
|
345
|
-
# https://www.ffmpeg.org/ffmpeg-formats.html#Examples-9
|
|
346
|
-
|
|
347
|
-
if not os.path.isfile(filepath):
|
|
348
|
-
raise IOError('File doesnt exists: {}'.format(filepath))
|
|
349
|
-
logger.info('Extracting video information...')
|
|
350
|
-
# call to ffmpeg to get frame rate
|
|
351
|
-
probe = Videos.get_info(filepath)
|
|
352
|
-
fps = eval(probe['streams'][0]['avg_frame_rate'])
|
|
353
|
-
n_frames = eval(probe['streams'][0]['nb_frames'])
|
|
354
|
-
video_length = eval(probe['streams'][0]['duration'])
|
|
355
|
-
logger.info('Video frame rate: {}[fps]'.format(fps))
|
|
356
|
-
logger.info('Video number of frames: {}'.format(n_frames))
|
|
357
|
-
logger.info('Video length in seconds: {}[s]'.format(video_length))
|
|
358
|
-
|
|
359
|
-
# check split params and calc split params for ffmpeg
|
|
360
|
-
if split_seconds is not None:
|
|
361
|
-
# split by seconds
|
|
362
|
-
split_length = split_seconds
|
|
363
|
-
if split_length <= 0:
|
|
364
|
-
raise ValueError('"split_length" can\'t be 0')
|
|
365
|
-
split_count = int(np.ceil(video_length / split_length))
|
|
366
|
-
list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]
|
|
367
|
-
elif split_chunks is not None:
|
|
368
|
-
# split for known number of chunks
|
|
369
|
-
split_count = split_chunks
|
|
370
|
-
if split_chunks <= 0:
|
|
371
|
-
raise ValueError('"split_chunks" size can\'t be 0')
|
|
372
|
-
split_length = int(np.ceil(video_length / split_chunks))
|
|
373
|
-
list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]
|
|
374
|
-
elif split_pairs is not None:
|
|
375
|
-
if not isinstance(split_pairs, list):
|
|
376
|
-
raise ValueError('"split_times" must be a list of tuples to split at.')
|
|
377
|
-
if not (isinstance(split_pairs[0], list) or isinstance(split_pairs[0], tuple)):
|
|
378
|
-
raise ValueError('"split_times" must be a list of tuples to split at.')
|
|
379
|
-
list_frames_to_split = [fps * split_second for segment in split_pairs for split_second in segment]
|
|
380
|
-
split_count = len(list_frames_to_split)
|
|
381
|
-
else:
|
|
382
|
-
raise ValueError('Must input one split option ("split_chunks", "split_time" or "split_pairs")')
|
|
383
|
-
if split_count == 1:
|
|
384
|
-
raise ValueError('Video length is less than the target split length.')
|
|
385
|
-
# to integers
|
|
386
|
-
list_frames_to_split = [int(i) for i in list_frames_to_split]
|
|
387
|
-
# remove 0 if in the first segmetn
|
|
388
|
-
if list_frames_to_split[0] == 0:
|
|
389
|
-
list_frames_to_split.pop(0)
|
|
390
|
-
# add last frames if not exists
|
|
391
|
-
if list_frames_to_split[-1] != n_frames:
|
|
392
|
-
list_frames_to_split = list_frames_to_split + [n_frames]
|
|
393
|
-
logger.info('Splitting to %d chunks' % split_count)
|
|
394
|
-
|
|
395
|
-
basename, ext = os.path.splitext(filepath)
|
|
396
|
-
output_regex = os.path.join(basename, '%%03d.mp4')
|
|
397
|
-
# create folder
|
|
398
|
-
if not os.path.exists(basename):
|
|
399
|
-
os.makedirs(basename, exist_ok=True)
|
|
400
|
-
# run ffmpeg
|
|
401
|
-
try:
|
|
402
|
-
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,
|
|
403
|
-
**{'x264opts': 'bframes=0',
|
|
404
|
-
'f': 'segment',
|
|
405
|
-
'reset_timestamps': '1',
|
|
406
|
-
'map': '0',
|
|
407
|
-
'segment_frames': ','.join(
|
|
408
|
-
[str(i) for i in
|
|
409
|
-
list_frames_to_split])
|
|
410
|
-
})
|
|
411
|
-
ffmpeg.overwrite_output(stream).run(capture_stdout=True)
|
|
412
|
-
except Exception:
|
|
413
|
-
logger.exception('ffmpeg error in disassemble:')
|
|
414
|
-
raise
|
|
415
|
-
|
|
416
|
-
# split_cmd = 'ffmpeg -y -i "%s" -b 0 -f mp4 -reset_timestamps 1 -map 0 -f segment -segment_frames %s "%s"' % (
|
|
417
|
-
# filepath, ','.join([str(int(i)) for i in list_frames_to_split]), output_regex)
|
|
418
|
-
# logger.info('About to run: %s' % split_cmd)
|
|
419
|
-
# subprocess.check_call(shlex.split(split_cmd), universal_newlines=True)
|
|
420
|
-
|
|
421
|
-
# rename
|
|
422
|
-
list_frames_to_split = [0] + list_frames_to_split
|
|
423
|
-
filenames = list()
|
|
424
|
-
for n in range(split_count):
|
|
425
|
-
old_filename = output_regex.replace('%03d', '%03d' % n)
|
|
426
|
-
new_filename = output_regex.replace('%03d', '%s__%s' %
|
|
427
|
-
(time.strftime('%H_%M_%S', time.gmtime(list_frames_to_split[n] / fps)),
|
|
428
|
-
time.strftime('%H_%M_%S',
|
|
429
|
-
time.gmtime(list_frames_to_split[n + 1] / fps))))
|
|
430
|
-
filenames.append(new_filename)
|
|
431
|
-
# rename to informative name
|
|
432
|
-
if os.path.isfile(new_filename):
|
|
433
|
-
logger.warning('File already exists. Overwriting!: {}'.format(new_filename))
|
|
434
|
-
os.remove(new_filename)
|
|
435
|
-
os.rename(old_filename, new_filename)
|
|
436
|
-
# check if in pairs, if not - delete
|
|
437
|
-
if split_pairs is not None:
|
|
438
|
-
start_frames = [pair[0] for pair in split_pairs]
|
|
439
|
-
end_frames = [pair[1] for pair in split_pairs]
|
|
440
|
-
if (list_frames_to_split[n] // fps) in start_frames and (
|
|
441
|
-
list_frames_to_split[n + 1] // fps) in end_frames:
|
|
442
|
-
# keep video
|
|
443
|
-
pass
|
|
444
|
-
else:
|
|
445
|
-
os.remove(new_filename)
|
|
446
|
-
Videos.upload_to_platform(project_name=project_name,
|
|
447
|
-
project_id=project_id,
|
|
448
|
-
dataset_name=dataset_name,
|
|
449
|
-
dataset_id=dataset_id,
|
|
450
|
-
remote_path=remote_path,
|
|
451
|
-
local_path=basename)
|
|
452
|
-
|
|
453
|
-
@staticmethod
|
|
454
|
-
def upload_to_platform(project_name=None, project_id=None, dataset_name=None, dataset_id=None,
|
|
455
|
-
local_path=None, remote_path=None):
|
|
456
|
-
|
|
457
|
-
import dtlpy as dlp
|
|
458
|
-
if project_id is not None or project_name is not None:
|
|
459
|
-
project = dlp.projects.get(project_name=project_name, project_id=project_id)
|
|
460
|
-
dataset = project.get(dataset_name=dataset_name, dataset_id=dataset_id)
|
|
461
|
-
dataset.items.upload(dataset_name=dataset_name,
|
|
462
|
-
dataset_id=dataset_id,
|
|
463
|
-
local_path=local_path,
|
|
464
|
-
remote_path=remote_path,
|
|
465
|
-
file_types=['.mp4'])
|
|
466
|
-
else:
|
|
467
|
-
dataset = dlp.datasets.get(dataset_name=dataset_name, dataset_id=dataset_id)
|
|
468
|
-
dataset.items.upload(local_path=local_path,
|
|
469
|
-
remote_path=remote_path,
|
|
470
|
-
file_types=['.mp4'])
|
|
1
|
+
import asyncio
|
|
2
|
+
import time
|
|
3
|
+
import types
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import os
|
|
7
|
+
import logging
|
|
8
|
+
import dtlpy as dl
|
|
9
|
+
import shutil
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(name='dtlpy')
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
##########
|
|
15
|
+
# Videos #
|
|
16
|
+
##########
|
|
17
|
+
class Videos:
|
|
18
|
+
def __init__(self):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def get_info(filepath):
|
|
23
|
+
try:
|
|
24
|
+
import ffmpeg
|
|
25
|
+
except ImportError:
|
|
26
|
+
logger.error(
|
|
27
|
+
'Import Error! Cant import ffmpeg. '
|
|
28
|
+
'Annotations operations will be limited. import manually and fix errors')
|
|
29
|
+
raise
|
|
30
|
+
probe = ffmpeg.probe(filepath)
|
|
31
|
+
return probe
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def get_max_object_id(item):
|
|
35
|
+
max_object_id = 1
|
|
36
|
+
annotations_list = item.annotations.list().annotations
|
|
37
|
+
if len(annotations_list) < 1:
|
|
38
|
+
return 1
|
|
39
|
+
for annotation in annotations_list:
|
|
40
|
+
if annotation.object_id is not None:
|
|
41
|
+
current_object_id = int(annotation.object_id)
|
|
42
|
+
if current_object_id > max_object_id:
|
|
43
|
+
max_object_id = current_object_id
|
|
44
|
+
return max_object_id
|
|
45
|
+
|
|
46
|
+
@staticmethod
|
|
47
|
+
def video_snapshots_generator(item_id=None, item=None, frame_interval=30, image_ext="png"):
|
|
48
|
+
futures = Videos._async_video_snapshots_generator(item_id=item_id,
|
|
49
|
+
item=item,
|
|
50
|
+
frame_interval=frame_interval,
|
|
51
|
+
image_ext=image_ext)
|
|
52
|
+
loop = asyncio.new_event_loop()
|
|
53
|
+
try:
|
|
54
|
+
asyncio.set_event_loop(loop)
|
|
55
|
+
return loop.run_until_complete(futures)
|
|
56
|
+
finally:
|
|
57
|
+
try:
|
|
58
|
+
loop.run_until_complete(loop.shutdown_asyncgens())
|
|
59
|
+
finally:
|
|
60
|
+
asyncio.set_event_loop(None)
|
|
61
|
+
loop.close()
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
async def _async_video_snapshots_generator(item_id=None, item=None, frame_interval=30, image_ext="png"):
|
|
65
|
+
"""
|
|
66
|
+
Create video-snapshots
|
|
67
|
+
|
|
68
|
+
:param item_id: item id for the video
|
|
69
|
+
:param item: item id for the video
|
|
70
|
+
:param frame_interval: number of frames to take next snapshot
|
|
71
|
+
:param image_ext: png/jpg
|
|
72
|
+
:return: the uploaded items
|
|
73
|
+
"""
|
|
74
|
+
if item_id is not None:
|
|
75
|
+
item = dl.items.get(item_id=item_id)
|
|
76
|
+
|
|
77
|
+
if item is None:
|
|
78
|
+
raise ValueError('Missing input item (or item_id)')
|
|
79
|
+
|
|
80
|
+
if not isinstance(frame_interval, int):
|
|
81
|
+
raise AttributeError('frame_interval is mast to be integer')
|
|
82
|
+
|
|
83
|
+
if "video" not in item.mimetype:
|
|
84
|
+
raise AttributeError("Got {} file type but only video files are supported".format(item.mimetype))
|
|
85
|
+
|
|
86
|
+
video_path = item.download()
|
|
87
|
+
|
|
88
|
+
# Get the time for single frame from metadata (duration/# of frames)
|
|
89
|
+
if 'system' in item.metadata and \
|
|
90
|
+
'ffmpeg' in item.metadata['system'] and \
|
|
91
|
+
'duration' in item.metadata['system']['ffmpeg'] and \
|
|
92
|
+
'nb_frames' in item.metadata['system']['ffmpeg']:
|
|
93
|
+
nb_frames = int(item.metadata["system"]["ffmpeg"]["nb_frames"])
|
|
94
|
+
duration = float(item.metadata["system"]["ffmpeg"]["duration"])
|
|
95
|
+
video_fps = duration / nb_frames
|
|
96
|
+
else:
|
|
97
|
+
try:
|
|
98
|
+
import cv2
|
|
99
|
+
except (ImportError, ModuleNotFoundError):
|
|
100
|
+
logger.error(
|
|
101
|
+
'Import Error! Cant import cv2. '
|
|
102
|
+
'Annotations operations will be limited. import manually and fix errors')
|
|
103
|
+
raise
|
|
104
|
+
|
|
105
|
+
video = cv2.VideoCapture(video_path)
|
|
106
|
+
video_fps = video.get(cv2.CAP_PROP_FPS)
|
|
107
|
+
nb_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
108
|
+
duration = video_fps * nb_frames
|
|
109
|
+
|
|
110
|
+
images_path = Videos.disassemble(filepath=video_path, frame_interval=frame_interval, image_ext=image_ext)
|
|
111
|
+
snapshots_items = list()
|
|
112
|
+
try:
|
|
113
|
+
# rename files
|
|
114
|
+
images = []
|
|
115
|
+
video_basename = os.path.basename(video_path)
|
|
116
|
+
for f in os.listdir(images_path):
|
|
117
|
+
images.append(f)
|
|
118
|
+
for image in images:
|
|
119
|
+
image_split_name, ext = os.path.splitext(image)
|
|
120
|
+
try:
|
|
121
|
+
frame = int(image_split_name) * frame_interval
|
|
122
|
+
file_frame_name = "{}.frame.{}{}".format(video_basename, frame, ext)
|
|
123
|
+
full_path = os.path.join(images_path, file_frame_name)
|
|
124
|
+
os.rename(os.path.join(images_path, image),
|
|
125
|
+
full_path)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
logger.debug("Rename {} has been failed: {}".format(os.path.join(images_path, image), e))
|
|
128
|
+
|
|
129
|
+
remote_path = os.path.join(os.path.split(item.filename)[0], "snapshots")
|
|
130
|
+
remote_url = '/items/{}/snapshots'.format(item.id)
|
|
131
|
+
to_upload = open(full_path, 'rb')
|
|
132
|
+
try:
|
|
133
|
+
response = await item._client_api.upload_file_async(to_upload=to_upload,
|
|
134
|
+
item_type='file',
|
|
135
|
+
item_size=os.stat(full_path).st_size,
|
|
136
|
+
remote_url=remote_url,
|
|
137
|
+
uploaded_filename=file_frame_name,
|
|
138
|
+
remote_path=remote_path)
|
|
139
|
+
except Exception:
|
|
140
|
+
raise
|
|
141
|
+
finally:
|
|
142
|
+
to_upload.close()
|
|
143
|
+
if response.ok:
|
|
144
|
+
snapshots_items.append(item.from_json(response.json(), item._client_api))
|
|
145
|
+
else:
|
|
146
|
+
raise dl.PlatformException(response)
|
|
147
|
+
|
|
148
|
+
# classification tpe annotation creation for each file
|
|
149
|
+
builder = item.annotations.builder()
|
|
150
|
+
annotation_itemlinks = []
|
|
151
|
+
|
|
152
|
+
max_object_id = Videos().get_max_object_id(item=item)
|
|
153
|
+
for snapshot_item in snapshots_items:
|
|
154
|
+
max_object_id += 1
|
|
155
|
+
item_frame = snapshot_item.name.rsplit("frame.", 1)[1].split(".")[0]
|
|
156
|
+
if item_frame.isnumeric():
|
|
157
|
+
item_time = int(item_frame) * video_fps
|
|
158
|
+
else:
|
|
159
|
+
item_frame = item_time = 0
|
|
160
|
+
|
|
161
|
+
snapshot_item.metadata["system"]["itemLinks"] = [{"type": "snapshotFrom",
|
|
162
|
+
"itemId": item.id,
|
|
163
|
+
"frame": item_frame,
|
|
164
|
+
"time": item_time}]
|
|
165
|
+
|
|
166
|
+
annotation_itemlinks.append({"type": "snapshotTo",
|
|
167
|
+
"itemId": snapshot_item.id,
|
|
168
|
+
"frame": item_frame,
|
|
169
|
+
"time": item_time})
|
|
170
|
+
|
|
171
|
+
snapshot_item.update(system_metadata=True)
|
|
172
|
+
annotation_definition = dl.Classification(label="Snapshot")
|
|
173
|
+
builder.add(annotation_definition=annotation_definition,
|
|
174
|
+
frame_num=int(item_frame),
|
|
175
|
+
end_frame_num=nb_frames if int(item_frame) + int(video_fps) > nb_frames else int(
|
|
176
|
+
item_frame) + int(video_fps),
|
|
177
|
+
start_time=item_time,
|
|
178
|
+
end_time=duration if item_time + 1 > duration else item_time + 1,
|
|
179
|
+
object_id=max_object_id)
|
|
180
|
+
|
|
181
|
+
annotations = item.annotations.upload(annotations=builder)
|
|
182
|
+
|
|
183
|
+
# update system metadata for annotations
|
|
184
|
+
count = 0
|
|
185
|
+
for annotation in annotations:
|
|
186
|
+
annotation.metadata["system"]["itemLinks"] = [annotation_itemlinks[count]]
|
|
187
|
+
count += 1
|
|
188
|
+
|
|
189
|
+
annotations.update(system_metadata=True)
|
|
190
|
+
except Exception as err:
|
|
191
|
+
logger.exception(err)
|
|
192
|
+
finally:
|
|
193
|
+
if os.path.isdir(images_path):
|
|
194
|
+
shutil.rmtree(images_path)
|
|
195
|
+
return snapshots_items
|
|
196
|
+
|
|
197
|
+
@staticmethod
|
|
198
|
+
def disassemble(filepath, fps=None, frame_interval=None, loglevel='panic', image_ext='jpg'):
|
|
199
|
+
"""
|
|
200
|
+
Disassemble video to images
|
|
201
|
+
|
|
202
|
+
:param filepath: input video filepath
|
|
203
|
+
:param fps: rate of disassemble. e.g if 1 frame per second fps is 1. if None all frames will be extracted
|
|
204
|
+
:param frame_interval: take image every frame # (if exists function ignore fps)
|
|
205
|
+
:param image_ext: png/jpg
|
|
206
|
+
:param loglevel: ffmpeg loglevel
|
|
207
|
+
:return:
|
|
208
|
+
"""
|
|
209
|
+
try:
|
|
210
|
+
import ffmpeg
|
|
211
|
+
except ImportError:
|
|
212
|
+
logger.error(
|
|
213
|
+
'Import Error! Cant import ffmpeg. '
|
|
214
|
+
'Annotations operations will be limited. import manually and fix errors')
|
|
215
|
+
raise
|
|
216
|
+
# get video information
|
|
217
|
+
video_props = Videos.get_info(filepath)
|
|
218
|
+
if 'system' in video_props and \
|
|
219
|
+
'nb_frames' in video_props['system'][0]:
|
|
220
|
+
nb_frames = video_props['streams'][0]['nb_frames']
|
|
221
|
+
else:
|
|
222
|
+
try:
|
|
223
|
+
import cv2
|
|
224
|
+
except (ImportError, ModuleNotFoundError):
|
|
225
|
+
logger.error(
|
|
226
|
+
'Import Error! Cant import cv2. '
|
|
227
|
+
'Annotations operations will be limited. import manually and fix errors')
|
|
228
|
+
raise
|
|
229
|
+
nb_frames = int(cv2.VideoCapture(filepath).get(cv2.CAP_PROP_FRAME_COUNT))
|
|
230
|
+
|
|
231
|
+
if not os.path.isfile(filepath):
|
|
232
|
+
raise IOError('File doesnt exists: {}'.format(filepath))
|
|
233
|
+
basename, ext = os.path.splitext(filepath)
|
|
234
|
+
# create folder for the frames
|
|
235
|
+
if os.path.exists(basename):
|
|
236
|
+
shutil.rmtree(basename)
|
|
237
|
+
|
|
238
|
+
os.makedirs(basename, exist_ok=True)
|
|
239
|
+
|
|
240
|
+
if fps is None:
|
|
241
|
+
try:
|
|
242
|
+
fps = eval(video_props['streams'][0]['avg_frame_rate'])
|
|
243
|
+
except ZeroDivisionError:
|
|
244
|
+
fps = 0
|
|
245
|
+
num_of_zeros = len(str(nb_frames))
|
|
246
|
+
# format the output filename
|
|
247
|
+
output_regex = os.path.join(basename, '%0{}d.{}'.format(num_of_zeros, image_ext))
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
if frame_interval is not None:
|
|
251
|
+
frame_number = 0
|
|
252
|
+
select = ""
|
|
253
|
+
while frame_number < nb_frames:
|
|
254
|
+
if select != "":
|
|
255
|
+
select += '+'
|
|
256
|
+
select += 'eq(n\\,{})'.format(frame_number)
|
|
257
|
+
frame_number += frame_interval
|
|
258
|
+
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,
|
|
259
|
+
**{'start_number': '0',
|
|
260
|
+
'vf': 'select=\'{}'.format(select),
|
|
261
|
+
'vsync': 'vfr'})
|
|
262
|
+
else:
|
|
263
|
+
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,
|
|
264
|
+
**{'start_number': '0',
|
|
265
|
+
'r': str(fps)})
|
|
266
|
+
|
|
267
|
+
ffmpeg.overwrite_output(stream).run()
|
|
268
|
+
except Exception:
|
|
269
|
+
logger.error('ffmpeg error in disassemble:')
|
|
270
|
+
raise
|
|
271
|
+
return basename
|
|
272
|
+
|
|
273
|
+
@staticmethod
|
|
274
|
+
def reencode(filepath, loglevel='panic'):
|
|
275
|
+
"""
|
|
276
|
+
Re-encode video as mp4, remove start offset and set bframes to 0
|
|
277
|
+
|
|
278
|
+
:param filepath: input video file
|
|
279
|
+
:param loglevel: ffmpeg loglevel
|
|
280
|
+
:return:
|
|
281
|
+
"""
|
|
282
|
+
try:
|
|
283
|
+
import ffmpeg
|
|
284
|
+
except ImportError:
|
|
285
|
+
logger.error(
|
|
286
|
+
'Import Error! Cant import ffmpeg. '
|
|
287
|
+
'Annotations operations will be limited. import manually and fix errors')
|
|
288
|
+
raise
|
|
289
|
+
if not os.path.isfile(filepath):
|
|
290
|
+
raise IOError('File doesnt exists: {}'.format(filepath))
|
|
291
|
+
# re encode video without b frame and as mp4
|
|
292
|
+
basename, ext = os.path.splitext(filepath)
|
|
293
|
+
output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))
|
|
294
|
+
if not os.path.isdir(os.path.dirname(output_filepath)):
|
|
295
|
+
os.makedirs(os.path.dirname(output_filepath))
|
|
296
|
+
try:
|
|
297
|
+
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,
|
|
298
|
+
**{'x264opts': 'bframes=0',
|
|
299
|
+
'f': 'mp4'})
|
|
300
|
+
ffmpeg.overwrite_output(stream).run()
|
|
301
|
+
except Exception as e:
|
|
302
|
+
logger.exception('ffmpeg error in disassemble:')
|
|
303
|
+
raise
|
|
304
|
+
|
|
305
|
+
output_probe = Videos.get_info(output_filepath)
|
|
306
|
+
start_time = eval(output_probe['streams'][0]['start_time'])
|
|
307
|
+
fps = eval(output_probe['streams'][0]['avg_frame_rate'])
|
|
308
|
+
has_b_frames = output_probe['streams'][0]['has_b_frames']
|
|
309
|
+
start_frame = fps * start_time
|
|
310
|
+
if start_time != 0:
|
|
311
|
+
logger.warning('Video start_time is not 0!')
|
|
312
|
+
if has_b_frames != 0:
|
|
313
|
+
logger.warning('Video still has b frames!')
|
|
314
|
+
return output_filepath
|
|
315
|
+
|
|
316
|
+
@staticmethod
|
|
317
|
+
def split_and_upload(filepath,
|
|
318
|
+
# upload parameters
|
|
319
|
+
project_name=None, project_id=None, dataset_name=None, dataset_id=None, remote_path=None,
|
|
320
|
+
# split parameters
|
|
321
|
+
split_seconds=None, split_chunks=None, split_pairs=None,
|
|
322
|
+
loglevel='panic'):
|
|
323
|
+
"""
|
|
324
|
+
Split video to chunks and upload to platform
|
|
325
|
+
|
|
326
|
+
:param filepath: input video file
|
|
327
|
+
:param project_name:
|
|
328
|
+
:param project_id:
|
|
329
|
+
:param dataset_name:
|
|
330
|
+
:param dataset_id:
|
|
331
|
+
:param remote_path:
|
|
332
|
+
:param split_seconds: split by seconds per chunk. each chunk's length will be this in seconds
|
|
333
|
+
:param split_chunks: split by number of chunks.
|
|
334
|
+
:param split_pairs: a list od (start, stop) segments to split in seconds . e.g [(0,400), (600,800)]
|
|
335
|
+
:param loglevel: ffmpeg loglevel
|
|
336
|
+
:return:
|
|
337
|
+
"""
|
|
338
|
+
try:
|
|
339
|
+
import ffmpeg
|
|
340
|
+
except ImportError:
|
|
341
|
+
logger.error(
|
|
342
|
+
'Import Error! Cant import ffmpeg. '
|
|
343
|
+
'Annotations operations will be limited. import manually and fix errors')
|
|
344
|
+
raise
|
|
345
|
+
# https://www.ffmpeg.org/ffmpeg-formats.html#Examples-9
|
|
346
|
+
|
|
347
|
+
if not os.path.isfile(filepath):
|
|
348
|
+
raise IOError('File doesnt exists: {}'.format(filepath))
|
|
349
|
+
logger.info('Extracting video information...')
|
|
350
|
+
# call to ffmpeg to get frame rate
|
|
351
|
+
probe = Videos.get_info(filepath)
|
|
352
|
+
fps = eval(probe['streams'][0]['avg_frame_rate'])
|
|
353
|
+
n_frames = eval(probe['streams'][0]['nb_frames'])
|
|
354
|
+
video_length = eval(probe['streams'][0]['duration'])
|
|
355
|
+
logger.info('Video frame rate: {}[fps]'.format(fps))
|
|
356
|
+
logger.info('Video number of frames: {}'.format(n_frames))
|
|
357
|
+
logger.info('Video length in seconds: {}[s]'.format(video_length))
|
|
358
|
+
|
|
359
|
+
# check split params and calc split params for ffmpeg
|
|
360
|
+
if split_seconds is not None:
|
|
361
|
+
# split by seconds
|
|
362
|
+
split_length = split_seconds
|
|
363
|
+
if split_length <= 0:
|
|
364
|
+
raise ValueError('"split_length" can\'t be 0')
|
|
365
|
+
split_count = int(np.ceil(video_length / split_length))
|
|
366
|
+
list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]
|
|
367
|
+
elif split_chunks is not None:
|
|
368
|
+
# split for known number of chunks
|
|
369
|
+
split_count = split_chunks
|
|
370
|
+
if split_chunks <= 0:
|
|
371
|
+
raise ValueError('"split_chunks" size can\'t be 0')
|
|
372
|
+
split_length = int(np.ceil(video_length / split_chunks))
|
|
373
|
+
list_frames_to_split = [fps * split_length * n for n in range(1, split_count)]
|
|
374
|
+
elif split_pairs is not None:
|
|
375
|
+
if not isinstance(split_pairs, list):
|
|
376
|
+
raise ValueError('"split_times" must be a list of tuples to split at.')
|
|
377
|
+
if not (isinstance(split_pairs[0], list) or isinstance(split_pairs[0], tuple)):
|
|
378
|
+
raise ValueError('"split_times" must be a list of tuples to split at.')
|
|
379
|
+
list_frames_to_split = [fps * split_second for segment in split_pairs for split_second in segment]
|
|
380
|
+
split_count = len(list_frames_to_split)
|
|
381
|
+
else:
|
|
382
|
+
raise ValueError('Must input one split option ("split_chunks", "split_time" or "split_pairs")')
|
|
383
|
+
if split_count == 1:
|
|
384
|
+
raise ValueError('Video length is less than the target split length.')
|
|
385
|
+
# to integers
|
|
386
|
+
list_frames_to_split = [int(i) for i in list_frames_to_split]
|
|
387
|
+
# remove 0 if in the first segmetn
|
|
388
|
+
if list_frames_to_split[0] == 0:
|
|
389
|
+
list_frames_to_split.pop(0)
|
|
390
|
+
# add last frames if not exists
|
|
391
|
+
if list_frames_to_split[-1] != n_frames:
|
|
392
|
+
list_frames_to_split = list_frames_to_split + [n_frames]
|
|
393
|
+
logger.info('Splitting to %d chunks' % split_count)
|
|
394
|
+
|
|
395
|
+
basename, ext = os.path.splitext(filepath)
|
|
396
|
+
output_regex = os.path.join(basename, '%%03d.mp4')
|
|
397
|
+
# create folder
|
|
398
|
+
if not os.path.exists(basename):
|
|
399
|
+
os.makedirs(basename, exist_ok=True)
|
|
400
|
+
# run ffmpeg
|
|
401
|
+
try:
|
|
402
|
+
stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,
|
|
403
|
+
**{'x264opts': 'bframes=0',
|
|
404
|
+
'f': 'segment',
|
|
405
|
+
'reset_timestamps': '1',
|
|
406
|
+
'map': '0',
|
|
407
|
+
'segment_frames': ','.join(
|
|
408
|
+
[str(i) for i in
|
|
409
|
+
list_frames_to_split])
|
|
410
|
+
})
|
|
411
|
+
ffmpeg.overwrite_output(stream).run(capture_stdout=True)
|
|
412
|
+
except Exception:
|
|
413
|
+
logger.exception('ffmpeg error in disassemble:')
|
|
414
|
+
raise
|
|
415
|
+
|
|
416
|
+
# split_cmd = 'ffmpeg -y -i "%s" -b 0 -f mp4 -reset_timestamps 1 -map 0 -f segment -segment_frames %s "%s"' % (
|
|
417
|
+
# filepath, ','.join([str(int(i)) for i in list_frames_to_split]), output_regex)
|
|
418
|
+
# logger.info('About to run: %s' % split_cmd)
|
|
419
|
+
# subprocess.check_call(shlex.split(split_cmd), universal_newlines=True)
|
|
420
|
+
|
|
421
|
+
# rename
|
|
422
|
+
list_frames_to_split = [0] + list_frames_to_split
|
|
423
|
+
filenames = list()
|
|
424
|
+
for n in range(split_count):
|
|
425
|
+
old_filename = output_regex.replace('%03d', '%03d' % n)
|
|
426
|
+
new_filename = output_regex.replace('%03d', '%s__%s' %
|
|
427
|
+
(time.strftime('%H_%M_%S', time.gmtime(list_frames_to_split[n] / fps)),
|
|
428
|
+
time.strftime('%H_%M_%S',
|
|
429
|
+
time.gmtime(list_frames_to_split[n + 1] / fps))))
|
|
430
|
+
filenames.append(new_filename)
|
|
431
|
+
# rename to informative name
|
|
432
|
+
if os.path.isfile(new_filename):
|
|
433
|
+
logger.warning('File already exists. Overwriting!: {}'.format(new_filename))
|
|
434
|
+
os.remove(new_filename)
|
|
435
|
+
os.rename(old_filename, new_filename)
|
|
436
|
+
# check if in pairs, if not - delete
|
|
437
|
+
if split_pairs is not None:
|
|
438
|
+
start_frames = [pair[0] for pair in split_pairs]
|
|
439
|
+
end_frames = [pair[1] for pair in split_pairs]
|
|
440
|
+
if (list_frames_to_split[n] // fps) in start_frames and (
|
|
441
|
+
list_frames_to_split[n + 1] // fps) in end_frames:
|
|
442
|
+
# keep video
|
|
443
|
+
pass
|
|
444
|
+
else:
|
|
445
|
+
os.remove(new_filename)
|
|
446
|
+
Videos.upload_to_platform(project_name=project_name,
|
|
447
|
+
project_id=project_id,
|
|
448
|
+
dataset_name=dataset_name,
|
|
449
|
+
dataset_id=dataset_id,
|
|
450
|
+
remote_path=remote_path,
|
|
451
|
+
local_path=basename)
|
|
452
|
+
|
|
453
|
+
@staticmethod
|
|
454
|
+
def upload_to_platform(project_name=None, project_id=None, dataset_name=None, dataset_id=None,
|
|
455
|
+
local_path=None, remote_path=None):
|
|
456
|
+
|
|
457
|
+
import dtlpy as dlp
|
|
458
|
+
if project_id is not None or project_name is not None:
|
|
459
|
+
project = dlp.projects.get(project_name=project_name, project_id=project_id)
|
|
460
|
+
dataset = project.get(dataset_name=dataset_name, dataset_id=dataset_id)
|
|
461
|
+
dataset.items.upload(dataset_name=dataset_name,
|
|
462
|
+
dataset_id=dataset_id,
|
|
463
|
+
local_path=local_path,
|
|
464
|
+
remote_path=remote_path,
|
|
465
|
+
file_types=['.mp4'])
|
|
466
|
+
else:
|
|
467
|
+
dataset = dlp.datasets.get(dataset_name=dataset_name, dataset_id=dataset_id)
|
|
468
|
+
dataset.items.upload(local_path=local_path,
|
|
469
|
+
remote_path=remote_path,
|
|
470
|
+
file_types=['.mp4'])
|