supervisely 6.73.410__py3-none-any.whl → 6.73.470__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/__init__.py +136 -1
- supervisely/_utils.py +81 -0
- supervisely/annotation/json_geometries_map.py +2 -0
- supervisely/annotation/label.py +80 -3
- supervisely/api/annotation_api.py +9 -9
- supervisely/api/api.py +67 -43
- supervisely/api/app_api.py +72 -5
- supervisely/api/dataset_api.py +108 -33
- supervisely/api/entity_annotation/figure_api.py +113 -49
- supervisely/api/image_api.py +82 -0
- supervisely/api/module_api.py +10 -0
- supervisely/api/nn/deploy_api.py +15 -9
- supervisely/api/nn/ecosystem_models_api.py +201 -0
- supervisely/api/nn/neural_network_api.py +12 -3
- supervisely/api/pointcloud/pointcloud_api.py +38 -0
- supervisely/api/pointcloud/pointcloud_episode_annotation_api.py +3 -0
- supervisely/api/project_api.py +213 -6
- supervisely/api/task_api.py +11 -1
- supervisely/api/video/video_annotation_api.py +4 -2
- supervisely/api/video/video_api.py +79 -1
- supervisely/api/video/video_figure_api.py +24 -11
- supervisely/api/volume/volume_api.py +38 -0
- supervisely/app/__init__.py +1 -1
- supervisely/app/content.py +14 -6
- supervisely/app/fastapi/__init__.py +1 -0
- supervisely/app/fastapi/custom_static_files.py +1 -1
- supervisely/app/fastapi/multi_user.py +88 -0
- supervisely/app/fastapi/subapp.py +175 -42
- supervisely/app/fastapi/templating.py +1 -1
- supervisely/app/fastapi/websocket.py +77 -9
- supervisely/app/singleton.py +21 -0
- supervisely/app/v1/app_service.py +18 -2
- supervisely/app/v1/constants.py +7 -1
- supervisely/app/widgets/__init__.py +11 -1
- supervisely/app/widgets/agent_selector/template.html +1 -0
- supervisely/app/widgets/card/card.py +20 -0
- supervisely/app/widgets/dataset_thumbnail/dataset_thumbnail.py +11 -2
- supervisely/app/widgets/dataset_thumbnail/template.html +3 -1
- supervisely/app/widgets/deploy_model/deploy_model.py +750 -0
- supervisely/app/widgets/dialog/dialog.py +12 -0
- supervisely/app/widgets/dialog/template.html +2 -1
- supervisely/app/widgets/dropdown_checkbox_selector/__init__.py +0 -0
- supervisely/app/widgets/dropdown_checkbox_selector/dropdown_checkbox_selector.py +87 -0
- supervisely/app/widgets/dropdown_checkbox_selector/template.html +12 -0
- supervisely/app/widgets/ecosystem_model_selector/__init__.py +0 -0
- supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +195 -0
- supervisely/app/widgets/experiment_selector/experiment_selector.py +454 -263
- supervisely/app/widgets/fast_table/fast_table.py +713 -126
- supervisely/app/widgets/fast_table/script.js +492 -95
- supervisely/app/widgets/fast_table/style.css +54 -0
- supervisely/app/widgets/fast_table/template.html +45 -5
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +523 -0
- supervisely/app/widgets/heatmap/script.js +378 -0
- supervisely/app/widgets/heatmap/style.css +227 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/input_tag/input_tag.py +102 -15
- supervisely/app/widgets/input_tag_list/__init__.py +0 -0
- supervisely/app/widgets/input_tag_list/input_tag_list.py +274 -0
- supervisely/app/widgets/input_tag_list/template.html +70 -0
- supervisely/app/widgets/radio_table/radio_table.py +10 -2
- supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
- supervisely/app/widgets/radio_tabs/template.html +1 -0
- supervisely/app/widgets/select/select.py +6 -4
- supervisely/app/widgets/select_dataset/select_dataset.py +6 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +83 -7
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/app/widgets/tabs/tabs.py +22 -6
- supervisely/app/widgets/tabs/template.html +5 -1
- supervisely/app/widgets/transfer/style.css +3 -0
- supervisely/app/widgets/transfer/template.html +3 -1
- supervisely/app/widgets/transfer/transfer.py +48 -45
- supervisely/app/widgets/tree_select/tree_select.py +2 -0
- supervisely/convert/image/csv/csv_converter.py +24 -15
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +43 -41
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +75 -51
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +137 -124
- supervisely/convert/video/video_converter.py +2 -2
- supervisely/geometry/polyline_3d.py +110 -0
- supervisely/io/env.py +161 -1
- supervisely/nn/artifacts/__init__.py +1 -1
- supervisely/nn/artifacts/artifacts.py +10 -2
- supervisely/nn/artifacts/detectron2.py +1 -0
- supervisely/nn/artifacts/hrda.py +1 -0
- supervisely/nn/artifacts/mmclassification.py +20 -0
- supervisely/nn/artifacts/mmdetection.py +5 -3
- supervisely/nn/artifacts/mmsegmentation.py +1 -0
- supervisely/nn/artifacts/ritm.py +1 -0
- supervisely/nn/artifacts/rtdetr.py +1 -0
- supervisely/nn/artifacts/unet.py +1 -0
- supervisely/nn/artifacts/utils.py +3 -0
- supervisely/nn/artifacts/yolov5.py +2 -0
- supervisely/nn/artifacts/yolov8.py +1 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +18 -18
- supervisely/nn/experiments.py +9 -0
- supervisely/nn/inference/cache.py +37 -17
- supervisely/nn/inference/gui/serving_gui_template.py +39 -13
- supervisely/nn/inference/inference.py +953 -211
- supervisely/nn/inference/inference_request.py +15 -8
- supervisely/nn/inference/instance_segmentation/instance_segmentation.py +1 -0
- supervisely/nn/inference/object_detection/object_detection.py +1 -0
- supervisely/nn/inference/predict_app/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/classes_selector.py +160 -0
- supervisely/nn/inference/predict_app/gui/gui.py +915 -0
- supervisely/nn/inference/predict_app/gui/input_selector.py +344 -0
- supervisely/nn/inference/predict_app/gui/model_selector.py +77 -0
- supervisely/nn/inference/predict_app/gui/output_selector.py +179 -0
- supervisely/nn/inference/predict_app/gui/preview.py +93 -0
- supervisely/nn/inference/predict_app/gui/settings_selector.py +881 -0
- supervisely/nn/inference/predict_app/gui/tags_selector.py +110 -0
- supervisely/nn/inference/predict_app/gui/utils.py +399 -0
- supervisely/nn/inference/predict_app/predict_app.py +176 -0
- supervisely/nn/inference/session.py +47 -39
- supervisely/nn/inference/tracking/bbox_tracking.py +5 -1
- supervisely/nn/inference/tracking/point_tracking.py +5 -1
- supervisely/nn/inference/tracking/tracker_interface.py +4 -0
- supervisely/nn/inference/uploader.py +9 -5
- supervisely/nn/model/model_api.py +44 -22
- supervisely/nn/model/prediction.py +15 -1
- supervisely/nn/model/prediction_session.py +70 -14
- supervisely/nn/prediction_dto.py +7 -0
- supervisely/nn/tracker/__init__.py +6 -8
- supervisely/nn/tracker/base_tracker.py +54 -0
- supervisely/nn/tracker/botsort/__init__.py +1 -0
- supervisely/nn/tracker/botsort/botsort_config.yaml +30 -0
- supervisely/nn/tracker/botsort/osnet_reid/__init__.py +0 -0
- supervisely/nn/tracker/botsort/osnet_reid/osnet.py +566 -0
- supervisely/nn/tracker/botsort/osnet_reid/osnet_reid_interface.py +88 -0
- supervisely/nn/tracker/botsort/tracker/__init__.py +0 -0
- supervisely/nn/tracker/{bot_sort → botsort/tracker}/basetrack.py +1 -2
- supervisely/nn/tracker/{utils → botsort/tracker}/gmc.py +51 -59
- supervisely/nn/tracker/{deep_sort/deep_sort → botsort/tracker}/kalman_filter.py +71 -33
- supervisely/nn/tracker/botsort/tracker/matching.py +202 -0
- supervisely/nn/tracker/{bot_sort/bot_sort.py → botsort/tracker/mc_bot_sort.py} +68 -81
- supervisely/nn/tracker/botsort_tracker.py +273 -0
- supervisely/nn/tracker/calculate_metrics.py +264 -0
- supervisely/nn/tracker/utils.py +273 -0
- supervisely/nn/tracker/visualize.py +520 -0
- supervisely/nn/training/gui/gui.py +152 -49
- supervisely/nn/training/gui/hyperparameters_selector.py +1 -1
- supervisely/nn/training/gui/model_selector.py +8 -6
- supervisely/nn/training/gui/train_val_splits_selector.py +144 -71
- supervisely/nn/training/gui/training_artifacts.py +3 -1
- supervisely/nn/training/train_app.py +225 -46
- supervisely/project/pointcloud_episode_project.py +12 -8
- supervisely/project/pointcloud_project.py +12 -8
- supervisely/project/project.py +221 -75
- supervisely/template/experiment/experiment.html.jinja +105 -55
- supervisely/template/experiment/experiment_generator.py +258 -112
- supervisely/template/experiment/header.html.jinja +31 -13
- supervisely/template/experiment/sly-style.css +7 -2
- supervisely/versions.json +3 -1
- supervisely/video/sampling.py +42 -20
- supervisely/video/video.py +41 -12
- supervisely/video_annotation/video_figure.py +38 -4
- supervisely/volume/stl_converter.py +2 -0
- supervisely/worker_api/agent_rpc.py +24 -1
- supervisely/worker_api/rpc_servicer.py +31 -7
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/METADATA +22 -14
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/RECORD +167 -148
- supervisely_lib/__init__.py +6 -1
- supervisely/app/widgets/experiment_selector/style.css +0 -27
- supervisely/app/widgets/experiment_selector/template.html +0 -61
- supervisely/nn/tracker/bot_sort/__init__.py +0 -21
- supervisely/nn/tracker/bot_sort/fast_reid_interface.py +0 -152
- supervisely/nn/tracker/bot_sort/matching.py +0 -127
- supervisely/nn/tracker/bot_sort/sly_tracker.py +0 -401
- supervisely/nn/tracker/deep_sort/__init__.py +0 -6
- supervisely/nn/tracker/deep_sort/deep_sort/__init__.py +0 -1
- supervisely/nn/tracker/deep_sort/deep_sort/detection.py +0 -49
- supervisely/nn/tracker/deep_sort/deep_sort/iou_matching.py +0 -81
- supervisely/nn/tracker/deep_sort/deep_sort/linear_assignment.py +0 -202
- supervisely/nn/tracker/deep_sort/deep_sort/nn_matching.py +0 -176
- supervisely/nn/tracker/deep_sort/deep_sort/track.py +0 -166
- supervisely/nn/tracker/deep_sort/deep_sort/tracker.py +0 -145
- supervisely/nn/tracker/deep_sort/deep_sort.py +0 -301
- supervisely/nn/tracker/deep_sort/generate_clip_detections.py +0 -90
- supervisely/nn/tracker/deep_sort/preprocessing.py +0 -70
- supervisely/nn/tracker/deep_sort/sly_tracker.py +0 -273
- supervisely/nn/tracker/tracker.py +0 -285
- supervisely/nn/tracker/utils/kalman_filter.py +0 -492
- supervisely/nn/tracking/__init__.py +0 -1
- supervisely/nn/tracking/boxmot.py +0 -114
- supervisely/nn/tracking/tracking.py +0 -24
- /supervisely/{nn/tracker/utils → app/widgets/deploy_model}/__init__.py +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/LICENSE +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/WHEEL +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/top_level.txt +0 -0
|
@@ -60,9 +60,11 @@
|
|
|
60
60
|
:selectContext="true"
|
|
61
61
|
:disabled="false"
|
|
62
62
|
:autoRun="false"
|
|
63
|
-
:
|
|
63
|
+
:state="{ 'trainTaskId': {{ experiment.task_id }}, 'run': false, 'stopAfterRun': false}"
|
|
64
|
+
:moduleId="{{ resources.apps.predict.module_id }}"
|
|
64
65
|
:openInNewWindow="true"
|
|
65
|
-
:command="command"
|
|
66
|
+
:command="command"
|
|
67
|
+
>
|
|
66
68
|
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA50lEQVR4nO3YOw6CQBSF4bHQwIJ9VEYL3JisQtgFBQ3JbyaimcoMxuEOer6aCedyCAnXORH5PUAJnIEWGLAzjBlOPtOU8DX5qYEiZgD/5HN1jBnAV/a0BdbOCLAGdkGeW8yh8J3fzJL0DZ8hyDPEHHhxmWBKJg2QAGrAGGrAGGrAGGrAGGrAGGrAGGrAGGrAGP/WwLD0n/o2OLC3HIJH+MPUtYpf4y16sVVmulq8Rq0WxyEKPy3QZLDcbcYsceE/Ed7xm9fORgNYUwNzAi5An+CL0wNV6vAroCOdLukA4xDVYhsQEZedOy9lmdD5aZkaAAAAAElFTkSuQmCC" alt="monitor--v1">
|
|
67
69
|
<span>Apply Model</span>
|
|
68
70
|
</sly-iw-launch-button>
|
|
@@ -75,6 +77,7 @@
|
|
|
75
77
|
<h2>Training Info</h2>
|
|
76
78
|
<a href="{{ experiment.training.session.url }}" target="_blank">Session <i class="zmdi zmdi-arrow-right-top" /></a>
|
|
77
79
|
<!-- Launch Logs Button -->
|
|
80
|
+
{% if experiment.training.logs.path %}
|
|
78
81
|
<sly-iw-launch-button
|
|
79
82
|
style=""
|
|
80
83
|
:disabled="false"
|
|
@@ -86,6 +89,7 @@
|
|
|
86
89
|
:command="command">
|
|
87
90
|
<span>Logs</span> <i class="zmdi zmdi-arrow-right-top" />
|
|
88
91
|
</sly-iw-launch-button>
|
|
92
|
+
{% endif %}
|
|
89
93
|
</div>
|
|
90
94
|
<ul>
|
|
91
95
|
<li><strong>Date:</strong> {{ experiment.date }}</li>
|
|
@@ -93,7 +97,7 @@
|
|
|
93
97
|
<li><strong>Model:</strong> {{ experiment.model.name }}</li>
|
|
94
98
|
{% if experiment.model.base_checkpoint.url and not experiment.model.base_checkpoint.path %}
|
|
95
99
|
<!-- Pretrained Checkpoint -> download link-->
|
|
96
|
-
<li><strong>Base checkpoint:</strong> <a href="{{ experiment.model.base_checkpoint.url }}" target="_blank" download="{{ experiment.model.base_checkpoint.name }}">{{ experiment.model.base_checkpoint.name }}
|
|
100
|
+
<li><strong>Base checkpoint:</strong> <a href="{{ experiment.model.base_checkpoint.url }}" target="_blank" download="{{ experiment.model.base_checkpoint.name }}">{{ experiment.model.base_checkpoint.name }} <i class="zmdi zmdi-download"></i></a></li>
|
|
97
101
|
{% elif experiment.model.base_checkpoint.path %}
|
|
98
102
|
<!-- Custom Checkpoint -> TeamFiles Link -->
|
|
99
103
|
<li><strong>Base checkpoint:</strong> <a href="{{ experiment.model.base_checkpoint.path }}" target="_blank">{{ experiment.model.base_checkpoint.name }}</a></li>
|
|
@@ -114,8 +118,19 @@
|
|
|
114
118
|
<ul>
|
|
115
119
|
<li><strong>Project:</strong> {{ experiment.project.name }}</li>
|
|
116
120
|
<li><strong>Classes:</strong> ({{ experiment.project.classes.count }}) {% for class_name in experiment.project.classes.names.short_list %}<span class="class-tag">{{ class_name }}</span> {% endfor %}</li>
|
|
117
|
-
|
|
118
|
-
|
|
121
|
+
|
|
122
|
+
{% if experiment.project.splits.train.url %}
|
|
123
|
+
<li><strong>Training:</strong> <a href='{{ experiment.project.splits.train.url }}' target="_blank">{{ experiment.project.splits.train.size }} {{ experiment.project.type }}</a></li>
|
|
124
|
+
{% else %}
|
|
125
|
+
<li><strong>Training:</strong>{{ experiment.project.splits.train.size }} {{ experiment.project.type }}</li>
|
|
126
|
+
{% endif %}
|
|
127
|
+
|
|
128
|
+
{% if experiment.project.splits.val.url %}
|
|
129
|
+
<li><strong>Validation:</strong> <a href='{{ experiment.project.splits.val.url }}' target="_blank">{{ experiment.project.splits.val.size }} {{ experiment.project.type }}</a></li>
|
|
130
|
+
{% else %}
|
|
131
|
+
<li><strong>Validation:</strong>{{ experiment.project.splits.val.size }} {{ experiment.project.type }}</li>
|
|
132
|
+
{% endif %}
|
|
133
|
+
|
|
119
134
|
</ul>
|
|
120
135
|
</div>
|
|
121
136
|
{% if experiment.training.evaluation.id %}
|
|
@@ -140,15 +155,18 @@
|
|
|
140
155
|
</div>
|
|
141
156
|
<div class="experiment-info-subheader-footer-menu">
|
|
142
157
|
{% if experiment.training.evaluation.id %}
|
|
143
|
-
<a :href="`${
|
|
144
|
-
<a :href="`${
|
|
158
|
+
<a :href="`${location.pathname}#predictions`" class="active">Predictions</a>
|
|
159
|
+
<a :href="`${location.pathname}#evaluation-2`">Evaluation</a>
|
|
145
160
|
{% endif %}
|
|
146
161
|
{% if not experiment.training.evaluation.id %}
|
|
147
|
-
<a :href="`${
|
|
162
|
+
<a :href="`${location.pathname}#evaluation-2`" class="active">Evaluation</a>
|
|
163
|
+
{% endif %}
|
|
164
|
+
{% if widgets.training_plots %}
|
|
165
|
+
<a :href="`${location.pathname}#training-plots`">Training Plots</a>
|
|
148
166
|
{% endif %}
|
|
149
|
-
<a :href="`${
|
|
150
|
-
<a :href="`${
|
|
151
|
-
<a :href="`${
|
|
152
|
-
<a :href="`${
|
|
167
|
+
<a :href="`${location.pathname}#artifacts`">Artifacts</a>
|
|
168
|
+
<a :href="`${location.pathname}#classes`">Classes</a>
|
|
169
|
+
<a :href="`${location.pathname}#hyperparameters`">Hyperparameters</a>
|
|
170
|
+
<a :href="`${location.pathname}#api-integration-amp-deployment`">API Integration & Deployment</a>
|
|
153
171
|
</div>
|
|
154
|
-
</div>
|
|
172
|
+
</div>
|
|
@@ -314,7 +314,7 @@
|
|
|
314
314
|
align-items: center;
|
|
315
315
|
}
|
|
316
316
|
|
|
317
|
-
.experiment-body .experiment-info-header .experiment-info-buttons a {
|
|
317
|
+
.experiment-body .experiment-info-header .experiment-info-buttons a, .training-plots-button {
|
|
318
318
|
display: flex;
|
|
319
319
|
gap: 5px;
|
|
320
320
|
align-items: center;
|
|
@@ -327,7 +327,8 @@
|
|
|
327
327
|
cursor: pointer;
|
|
328
328
|
}
|
|
329
329
|
|
|
330
|
-
|
|
330
|
+
|
|
331
|
+
.experiment-body .experiment-info-header .experiment-info-buttons a.primary, .training-plots-button {
|
|
331
332
|
background: black;
|
|
332
333
|
color: white;
|
|
333
334
|
}
|
|
@@ -338,6 +339,10 @@
|
|
|
338
339
|
background: transparent;
|
|
339
340
|
}
|
|
340
341
|
|
|
342
|
+
.experiment-body .training-plots-button {
|
|
343
|
+
display: inline-block;
|
|
344
|
+
}
|
|
345
|
+
|
|
341
346
|
/*********************** Metrics ***********************/
|
|
342
347
|
|
|
343
348
|
.experiment-body .experiment-metric {
|
supervisely/versions.json
CHANGED
supervisely/video/sampling.py
CHANGED
|
@@ -94,10 +94,12 @@ def _frame_to_annotation(frame: Frame, video_annotation: VideoAnnotation) -> Ann
|
|
|
94
94
|
|
|
95
95
|
def _upload_annotations(api: Api, image_ids, frame_indices, video_annotation: VideoAnnotation):
|
|
96
96
|
anns = []
|
|
97
|
-
for
|
|
97
|
+
for frame_index in frame_indices:
|
|
98
98
|
frame = video_annotation.frames.get(frame_index, None)
|
|
99
99
|
if frame is not None:
|
|
100
100
|
anns.append(_frame_to_annotation(frame, video_annotation))
|
|
101
|
+
else:
|
|
102
|
+
anns.append(Annotation(video_annotation.img_size))
|
|
101
103
|
api.annotation.upload_anns(image_ids, anns=anns)
|
|
102
104
|
|
|
103
105
|
|
|
@@ -223,28 +225,48 @@ def sample_video(
|
|
|
223
225
|
progress.miniters = 1
|
|
224
226
|
progress.refresh()
|
|
225
227
|
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
for
|
|
228
|
+
batch_size = 50
|
|
229
|
+
try:
|
|
230
|
+
with VideoFrameReader(video_path, frame_indices) as reader:
|
|
231
|
+
for batch_indices in batched_iter(frame_indices, batch_size):
|
|
232
|
+
batch_indices_list = list(batch_indices)
|
|
233
|
+
frames = reader.read_batch(batch_indices_list)
|
|
234
|
+
|
|
230
235
|
if resize:
|
|
231
|
-
|
|
236
|
+
resized_frames = []
|
|
237
|
+
for frame in frames:
|
|
238
|
+
resized_frame = cv2.resize(
|
|
239
|
+
frame,
|
|
240
|
+
(resize[1], resize[0]), # (width, height)
|
|
241
|
+
interpolation=cv2.INTER_LINEAR,
|
|
242
|
+
)
|
|
243
|
+
resized_frames.append(resized_frame)
|
|
244
|
+
frames = resized_frames
|
|
245
|
+
|
|
246
|
+
image_ids = _upload_frames(
|
|
247
|
+
api=api,
|
|
248
|
+
frames=frames,
|
|
249
|
+
video_name=video_info.name,
|
|
250
|
+
video_frames_count=video_info.frames_count,
|
|
251
|
+
indices=batch_indices_list,
|
|
252
|
+
dataset_id=dst_dataset_info.id,
|
|
253
|
+
sample_info=sample_info,
|
|
254
|
+
context=context,
|
|
255
|
+
copy_annotations=copy_annotations,
|
|
256
|
+
video_annotation=video_annotation,
|
|
257
|
+
)
|
|
232
258
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
frames=frames,
|
|
236
|
-
video_name=video_info.name,
|
|
237
|
-
video_frames_count=video_info.frames_count,
|
|
238
|
-
indices=indices,
|
|
239
|
-
dataset_id=dst_dataset_info.id,
|
|
240
|
-
sample_info=sample_info,
|
|
241
|
-
context=context,
|
|
242
|
-
copy_annotations=copy_annotations,
|
|
243
|
-
video_annotation=video_annotation,
|
|
244
|
-
)
|
|
259
|
+
if progress is not None:
|
|
260
|
+
progress.update(len(image_ids))
|
|
245
261
|
|
|
246
|
-
|
|
247
|
-
|
|
262
|
+
# Free memory after each batch
|
|
263
|
+
del frames
|
|
264
|
+
if resize:
|
|
265
|
+
del resized_frames
|
|
266
|
+
finally:
|
|
267
|
+
import os
|
|
268
|
+
if os.path.exists(video_path):
|
|
269
|
+
os.remove(video_path)
|
|
248
270
|
|
|
249
271
|
|
|
250
272
|
def _get_or_create_dst_dataset(
|
supervisely/video/video.py
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
from __future__ import annotations
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
|
-
from typing import Dict, Generator, List, Optional, Tuple
|
|
7
|
+
from typing import Dict, Generator, Iterable, List, Optional, Tuple
|
|
8
8
|
|
|
9
9
|
import cv2
|
|
10
10
|
import numpy as np
|
|
@@ -18,7 +18,7 @@ ALLOWED_VIDEO_EXTENSIONS = [".avi", ".mp4", ".3gp", ".flv", ".webm", ".wmv", ".m
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
_SUPPORTED_CONTAINERS = {"mp4", "webm", "ogg", "ogv"}
|
|
21
|
-
_SUPPORTED_CODECS = {"h264", "vp8", "vp9"}
|
|
21
|
+
_SUPPORTED_CODECS = {"h264", "vp8", "vp9", "h265", "hevc", "av1"}
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
class VideoExtensionError(Exception):
|
|
@@ -537,11 +537,9 @@ class VideoFrameReader:
|
|
|
537
537
|
try:
|
|
538
538
|
import decord
|
|
539
539
|
|
|
540
|
-
self.vr = decord.VideoReader(str(self.video_path))
|
|
540
|
+
self.vr = decord.VideoReader(str(self.video_path), num_threads=1)
|
|
541
541
|
except ImportError:
|
|
542
|
-
default_logger.debug(
|
|
543
|
-
"Decord is not installed. Falling back to OpenCV for video reading."
|
|
544
|
-
)
|
|
542
|
+
default_logger.debug("Decord is not installed. Falling back to OpenCV for video reading.")
|
|
545
543
|
self.cap = cv2.VideoCapture(str(self.video_path))
|
|
546
544
|
|
|
547
545
|
def close(self):
|
|
@@ -562,24 +560,30 @@ class VideoFrameReader:
|
|
|
562
560
|
def __del__(self):
|
|
563
561
|
self.close()
|
|
564
562
|
|
|
565
|
-
def iterate_frames(self, frame_indexes: List[int] = None) -> Generator[np.ndarray, None, None]:
|
|
563
|
+
def iterate_frames(self, frame_indexes: Optional[List[int]] = None) -> Generator[np.ndarray, None, None]:
|
|
566
564
|
self._ensure_initialized()
|
|
567
565
|
if frame_indexes is None:
|
|
568
566
|
frame_indexes = self.frame_indexes
|
|
569
567
|
if self.vr is not None:
|
|
568
|
+
# Decord
|
|
570
569
|
if frame_indexes is None:
|
|
571
570
|
frame_indexes = range(len(self.vr))
|
|
572
|
-
for
|
|
573
|
-
|
|
574
|
-
yield
|
|
571
|
+
for idx in frame_indexes:
|
|
572
|
+
arr = self.vr[idx].asnumpy()
|
|
573
|
+
yield arr
|
|
574
|
+
del arr
|
|
575
575
|
else:
|
|
576
|
+
# OpenCV fallback
|
|
576
577
|
if frame_indexes is None:
|
|
577
578
|
frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
578
579
|
frame_indexes = range(frame_count)
|
|
579
580
|
for frame_index in frame_indexes:
|
|
580
|
-
if 1
|
|
581
|
+
if 1 < frame_index - self.prev_idx < 20:
|
|
581
582
|
while self.prev_idx < frame_index - 1:
|
|
582
|
-
self.cap.read()
|
|
583
|
+
ok, _ = self.cap.read()
|
|
584
|
+
if not ok:
|
|
585
|
+
break
|
|
586
|
+
self.prev_idx += 1
|
|
583
587
|
if frame_index != self.prev_idx + 1:
|
|
584
588
|
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
|
|
585
589
|
ret, frame = self.cap.read()
|
|
@@ -588,6 +592,17 @@ class VideoFrameReader:
|
|
|
588
592
|
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
589
593
|
self.prev_idx = frame_index
|
|
590
594
|
|
|
595
|
+
def read_batch(self, frame_indexes: List[int]) -> List[np.ndarray]:
|
|
596
|
+
self._ensure_initialized()
|
|
597
|
+
if self.vr is not None:
|
|
598
|
+
batch_nd = self.vr.get_batch(frame_indexes)
|
|
599
|
+
batch_np = batch_nd.asnumpy()
|
|
600
|
+
frames = [batch_np[i].copy() for i in range(batch_np.shape[0])]
|
|
601
|
+
del batch_np
|
|
602
|
+
return frames
|
|
603
|
+
else:
|
|
604
|
+
return list(self.iterate_frames(frame_indexes))
|
|
605
|
+
|
|
591
606
|
def read_frames(self, frame_indexes: List[int] = None) -> List[np.ndarray]:
|
|
592
607
|
return list(self.iterate_frames(frame_indexes))
|
|
593
608
|
|
|
@@ -625,3 +640,17 @@ class VideoFrameReader:
|
|
|
625
640
|
return self.vr.get_avg_fps()
|
|
626
641
|
else:
|
|
627
642
|
return int(self.cap.get(cv2.CAP_PROP_FPS))
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
def create_from_frames(frames: Iterable[np.ndarray], output_path: str, fps: int = 30) -> None:
|
|
646
|
+
video_writer = None
|
|
647
|
+
for frame in frames:
|
|
648
|
+
if video_writer is None:
|
|
649
|
+
height, width, _ = frame.shape
|
|
650
|
+
fourcc = cv2.VideoWriter.fourcc(*"mp4v")
|
|
651
|
+
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
|
652
|
+
if frame.dtype != np.uint8:
|
|
653
|
+
frame = (frame * 255).astype(np.uint8) if frame.max() <= 1.0 else frame.astype(np.uint8)
|
|
654
|
+
|
|
655
|
+
video_writer.write(frame)
|
|
656
|
+
video_writer.release()
|
|
@@ -24,6 +24,8 @@ from supervisely.video_annotation.key_id_map import KeyIdMap
|
|
|
24
24
|
from supervisely.video_annotation.video_object import VideoObject
|
|
25
25
|
from supervisely.video_annotation.video_object_collection import VideoObjectCollection
|
|
26
26
|
|
|
27
|
+
from supervisely.annotation.label import LabelingStatus
|
|
28
|
+
|
|
27
29
|
|
|
28
30
|
class OutOfImageBoundsException(Exception):
|
|
29
31
|
pass
|
|
@@ -55,6 +57,8 @@ class VideoFigure:
|
|
|
55
57
|
:type smart_tool_input: dict, optional
|
|
56
58
|
:param priority: Priority of the figure (position of the figure relative to other overlapping or underlying figures).
|
|
57
59
|
:type priority: int, optional
|
|
60
|
+
:param status: Sets labeling status. Shows how label was created and corrected.
|
|
61
|
+
:type status: LabelingStatus, optional
|
|
58
62
|
:Usage example:
|
|
59
63
|
|
|
60
64
|
.. code-block:: python
|
|
@@ -86,7 +90,9 @@ class VideoFigure:
|
|
|
86
90
|
# ],
|
|
87
91
|
# "interior": []
|
|
88
92
|
# }
|
|
89
|
-
# }
|
|
93
|
+
# },
|
|
94
|
+
# "nnCreated": false,
|
|
95
|
+
# "nnUpdated": false
|
|
90
96
|
# }
|
|
91
97
|
"""
|
|
92
98
|
|
|
@@ -103,6 +109,7 @@ class VideoFigure:
|
|
|
103
109
|
track_id: Optional[str] = None,
|
|
104
110
|
smart_tool_input: Optional[Dict] = None,
|
|
105
111
|
priority: Optional[int] = None,
|
|
112
|
+
status: Optional[LabelingStatus] = None,
|
|
106
113
|
):
|
|
107
114
|
self._video_object = video_object
|
|
108
115
|
self._set_geometry_inplace(geometry)
|
|
@@ -116,6 +123,11 @@ class VideoFigure:
|
|
|
116
123
|
self._smart_tool_input = smart_tool_input
|
|
117
124
|
self._priority = priority
|
|
118
125
|
|
|
126
|
+
if status is None:
|
|
127
|
+
status = LabelingStatus.MANUAL
|
|
128
|
+
self._status = status
|
|
129
|
+
self._nn_created, self._nn_updated = LabelingStatus.to_flags(self.status)
|
|
130
|
+
|
|
119
131
|
def _add_creation_info(self, d):
|
|
120
132
|
if self.labeler_login is not None:
|
|
121
133
|
d[LABELER_LOGIN] = self.labeler_login
|
|
@@ -339,9 +351,9 @@ class VideoFigure:
|
|
|
339
351
|
# "interior": []
|
|
340
352
|
# }
|
|
341
353
|
# },
|
|
342
|
-
# "meta": {
|
|
343
|
-
#
|
|
344
|
-
#
|
|
354
|
+
# "meta": {"frame": 7},
|
|
355
|
+
# "nnCreated": false,
|
|
356
|
+
# "nnUpdated": false
|
|
345
357
|
# }
|
|
346
358
|
"""
|
|
347
359
|
data_json = {
|
|
@@ -349,6 +361,8 @@ class VideoFigure:
|
|
|
349
361
|
OBJECT_KEY: self.parent_object.key().hex,
|
|
350
362
|
ApiField.GEOMETRY_TYPE: self.geometry.geometry_name(),
|
|
351
363
|
ApiField.GEOMETRY: self.geometry.to_json(),
|
|
364
|
+
ApiField.NN_CREATED: self._nn_created,
|
|
365
|
+
ApiField.NN_UPDATED: self._nn_updated,
|
|
352
366
|
}
|
|
353
367
|
|
|
354
368
|
if key_id_map is not None:
|
|
@@ -473,6 +487,10 @@ class VideoFigure:
|
|
|
473
487
|
smart_tool_input = data.get(ApiField.SMART_TOOL_INPUT, None)
|
|
474
488
|
priority = data.get(ApiField.PRIORITY, None)
|
|
475
489
|
|
|
490
|
+
nn_created = data.get(ApiField.NN_CREATED, False)
|
|
491
|
+
nn_updated = data.get(ApiField.NN_UPDATED, False)
|
|
492
|
+
status = LabelingStatus.from_flags(nn_created, nn_updated)
|
|
493
|
+
|
|
476
494
|
return cls(
|
|
477
495
|
object,
|
|
478
496
|
geometry,
|
|
@@ -485,6 +503,7 @@ class VideoFigure:
|
|
|
485
503
|
track_id=track_id,
|
|
486
504
|
smart_tool_input=smart_tool_input,
|
|
487
505
|
priority=priority,
|
|
506
|
+
status=status,
|
|
488
507
|
)
|
|
489
508
|
|
|
490
509
|
def clone(
|
|
@@ -500,6 +519,7 @@ class VideoFigure:
|
|
|
500
519
|
track_id: Optional[str] = None,
|
|
501
520
|
smart_tool_input: Optional[Dict] = None,
|
|
502
521
|
priority: Optional[int] = None,
|
|
522
|
+
status: Optional[LabelingStatus] = None,
|
|
503
523
|
) -> VideoFigure:
|
|
504
524
|
"""
|
|
505
525
|
Makes a copy of VideoFigure with new fields, if fields are given, otherwise it will use fields of the original VideoFigure.
|
|
@@ -526,6 +546,8 @@ class VideoFigure:
|
|
|
526
546
|
:type smart_tool_input: dict, optional
|
|
527
547
|
:param priority: Priority of the figure (position of the figure relative to other overlapping or underlying figures).
|
|
528
548
|
:type priority: int, optional
|
|
549
|
+
:param status: Sets labeling status. Specifies if the VideoFigure was created by NN model, manually or created by NN and then manually corrected.
|
|
550
|
+
:type status: LabelingStatus, optional
|
|
529
551
|
:return: VideoFigure object
|
|
530
552
|
:rtype: :class:`VideoFigure`
|
|
531
553
|
|
|
@@ -582,8 +604,20 @@ class VideoFigure:
|
|
|
582
604
|
track_id=take_with_default(track_id, self.track_id),
|
|
583
605
|
smart_tool_input=take_with_default(smart_tool_input, self._smart_tool_input),
|
|
584
606
|
priority=take_with_default(priority, self._priority),
|
|
607
|
+
status=take_with_default(status, self.status),
|
|
585
608
|
)
|
|
586
609
|
|
|
610
|
+
@property
|
|
611
|
+
def status(self) -> LabelingStatus:
|
|
612
|
+
"""Labeling status. Specifies if the VideoFigure was created by NN model, manually or created by NN and then manually corrected."""
|
|
613
|
+
return self._status
|
|
614
|
+
|
|
615
|
+
@status.setter
|
|
616
|
+
def status(self, status: LabelingStatus):
|
|
617
|
+
"""Set labeling status."""
|
|
618
|
+
self._status = status
|
|
619
|
+
self._nn_created, self._nn_updated = LabelingStatus.to_flags(self.status)
|
|
620
|
+
|
|
587
621
|
def validate_bounds(
|
|
588
622
|
self, img_size: Tuple[int, int], _auto_correct: Optional[bool] = False
|
|
589
623
|
) -> None:
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
|
+
# isort: skip_file
|
|
2
3
|
|
|
3
4
|
import cv2
|
|
4
5
|
import numpy as np
|
|
5
6
|
|
|
6
7
|
from .chunking import load_to_memory_chunked_image, load_to_memory_chunked
|
|
7
|
-
|
|
8
|
+
|
|
9
|
+
# from ..worker_proto import worker_api_pb2 as api_proto # Import moved to methods where needed
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
class SimpleCache:
|
|
@@ -22,6 +24,13 @@ class SimpleCache:
|
|
|
22
24
|
|
|
23
25
|
|
|
24
26
|
def download_image_from_remote(agent_api, image_hash, src_node_token, logger):
|
|
27
|
+
try:
|
|
28
|
+
from ..worker_proto import worker_api_pb2 as api_proto
|
|
29
|
+
except Exception as e:
|
|
30
|
+
from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
|
|
31
|
+
|
|
32
|
+
raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
|
|
33
|
+
|
|
25
34
|
resp = agent_api.get_stream_with_data(
|
|
26
35
|
'DownloadImages',
|
|
27
36
|
api_proto.ChunkImage,
|
|
@@ -34,6 +43,13 @@ def download_image_from_remote(agent_api, image_hash, src_node_token, logger):
|
|
|
34
43
|
|
|
35
44
|
|
|
36
45
|
def download_data_from_remote(agent_api, req_id, logger):
|
|
46
|
+
try:
|
|
47
|
+
from ..worker_proto import worker_api_pb2 as api_proto
|
|
48
|
+
except Exception as e:
|
|
49
|
+
from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
|
|
50
|
+
|
|
51
|
+
raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
|
|
52
|
+
|
|
37
53
|
resp = agent_api.get_stream_with_data('GetGeneralEventData', api_proto.Chunk, api_proto.Empty(),
|
|
38
54
|
addit_headers={'x-request-id': req_id})
|
|
39
55
|
b_data = load_to_memory_chunked(resp)
|
|
@@ -47,6 +63,13 @@ def batched(seq, batch_size):
|
|
|
47
63
|
|
|
48
64
|
|
|
49
65
|
def send_from_memory_generator(out_bytes, chunk_size):
|
|
66
|
+
try:
|
|
67
|
+
from ..worker_proto import worker_api_pb2 as api_proto
|
|
68
|
+
except Exception as e:
|
|
69
|
+
from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
|
|
70
|
+
|
|
71
|
+
raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
|
|
72
|
+
|
|
50
73
|
for bytes_chunk in batched(out_bytes, chunk_size):
|
|
51
74
|
yield api_proto.Chunk(buffer=bytes_chunk, total_size=len(out_bytes))
|
|
52
75
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
|
+
# isort: skip_file
|
|
2
3
|
|
|
3
4
|
import os
|
|
4
5
|
import concurrent.futures
|
|
@@ -11,18 +12,27 @@ import threading
|
|
|
11
12
|
from supervisely.annotation.annotation import Annotation
|
|
12
13
|
from supervisely.function_wrapper import function_wrapper, function_wrapper_nofail
|
|
13
14
|
from supervisely.imaging.image import drop_image_alpha_channel
|
|
14
|
-
from supervisely.nn.legacy.hosted.inference_modes import
|
|
15
|
-
|
|
15
|
+
from supervisely.nn.legacy.hosted.inference_modes import (
|
|
16
|
+
InferenceModeFactory,
|
|
17
|
+
InfModeFullImage,
|
|
18
|
+
MODE,
|
|
19
|
+
NAME,
|
|
20
|
+
get_effective_inference_mode_config,
|
|
21
|
+
)
|
|
16
22
|
from supervisely.project.project_meta import ProjectMeta
|
|
17
23
|
from supervisely.worker_api.agent_api import AgentAPI
|
|
18
|
-
from supervisely.worker_api.agent_rpc import
|
|
19
|
-
|
|
24
|
+
from supervisely.worker_api.agent_rpc import (
|
|
25
|
+
decode_image,
|
|
26
|
+
download_image_from_remote,
|
|
27
|
+
download_data_from_remote,
|
|
28
|
+
send_from_memory_generator,
|
|
29
|
+
)
|
|
20
30
|
from supervisely.worker_api.interfaces import SingleImageInferenceInterface
|
|
21
|
-
|
|
31
|
+
|
|
32
|
+
# from supervisely.worker_proto import worker_api_pb2 as api_proto # Import moved to methods where needed
|
|
22
33
|
from supervisely.task.progress import report_agent_rpc_ready
|
|
23
34
|
from supervisely.api.api import Api
|
|
24
35
|
|
|
25
|
-
|
|
26
36
|
REQUEST_TYPE = 'request_type'
|
|
27
37
|
GET_OUT_META = 'get_out_meta'
|
|
28
38
|
INFERENCE = 'inference'
|
|
@@ -123,6 +133,13 @@ class AgentRPCServicerBase:
|
|
|
123
133
|
self.thread_pool.submit(function_wrapper_nofail, self._send_data, res_msg, req_id) # skip errors
|
|
124
134
|
|
|
125
135
|
def _send_data(self, out_msg, req_id):
|
|
136
|
+
try:
|
|
137
|
+
from supervisely.worker_proto import worker_api_pb2 as api_proto
|
|
138
|
+
except Exception as e:
|
|
139
|
+
from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
|
|
140
|
+
|
|
141
|
+
raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
|
|
142
|
+
|
|
126
143
|
self.logger.trace('Will send output data.', extra={REQUEST_ID: req_id})
|
|
127
144
|
out_bytes = json.dumps(out_msg).encode('utf-8')
|
|
128
145
|
|
|
@@ -173,6 +190,13 @@ class AgentRPCServicerBase:
|
|
|
173
190
|
self._load_data_if_required(event_obj)
|
|
174
191
|
|
|
175
192
|
def run_inf_loop(self):
|
|
193
|
+
try:
|
|
194
|
+
from supervisely.worker_proto import worker_api_pb2 as api_proto
|
|
195
|
+
except Exception as e:
|
|
196
|
+
from supervisely.app.v1.constants import PROTOBUF_REQUIRED_ERROR
|
|
197
|
+
|
|
198
|
+
raise ImportError(PROTOBUF_REQUIRED_ERROR) from e
|
|
199
|
+
|
|
176
200
|
def seq_inf_wrapped():
|
|
177
201
|
function_wrapper(self._sequential_final_processing) # exit if raised
|
|
178
202
|
|
|
@@ -252,4 +276,4 @@ class InactiveRPCServicer(AgentRPCServicer):
|
|
|
252
276
|
self.logger.info('Created InactiveRPCServicer for internal usage', extra=conn_config)
|
|
253
277
|
|
|
254
278
|
def run_inf_loop(self):
|
|
255
|
-
raise RuntimeError("Method is not accessible")
|
|
279
|
+
raise RuntimeError("Method is not accessible")
|