supervisely 6.73.463__py3-none-any.whl → 6.73.464__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/nn/inference/inference.py +256 -0
- {supervisely-6.73.463.dist-info → supervisely-6.73.464.dist-info}/METADATA +1 -1
- {supervisely-6.73.463.dist-info → supervisely-6.73.464.dist-info}/RECORD +7 -7
- {supervisely-6.73.463.dist-info → supervisely-6.73.464.dist-info}/LICENSE +0 -0
- {supervisely-6.73.463.dist-info → supervisely-6.73.464.dist-info}/WHEEL +0 -0
- {supervisely-6.73.463.dist-info → supervisely-6.73.464.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.463.dist-info → supervisely-6.73.464.dist-info}/top_level.txt +0 -0
|
@@ -11,6 +11,7 @@ import subprocess
|
|
|
11
11
|
import tempfile
|
|
12
12
|
import threading
|
|
13
13
|
import time
|
|
14
|
+
import uuid
|
|
14
15
|
from collections import OrderedDict, defaultdict
|
|
15
16
|
from concurrent.futures import ThreadPoolExecutor
|
|
16
17
|
from dataclasses import asdict, dataclass
|
|
@@ -52,6 +53,7 @@ from supervisely.annotation.tag_meta import TagMeta, TagValueType
|
|
|
52
53
|
from supervisely.api.api import Api, ApiField
|
|
53
54
|
from supervisely.api.app_api import WorkflowMeta, WorkflowSettings
|
|
54
55
|
from supervisely.api.image_api import ImageInfo
|
|
56
|
+
from supervisely.api.video.video_api import VideoInfo
|
|
55
57
|
from supervisely.app.content import get_data_dir
|
|
56
58
|
from supervisely.app.fastapi.subapp import (
|
|
57
59
|
Application,
|
|
@@ -102,6 +104,11 @@ from supervisely.video_annotation.video_figure import VideoFigure
|
|
|
102
104
|
from supervisely.video_annotation.video_object import VideoObject
|
|
103
105
|
from supervisely.video_annotation.video_object_collection import VideoObjectCollection
|
|
104
106
|
from supervisely.video_annotation.video_tag_collection import VideoTagCollection
|
|
107
|
+
from supervisely.video_annotation.key_id_map import KeyIdMap
|
|
108
|
+
from supervisely.video_annotation.video_object_collection import (
|
|
109
|
+
VideoObject,
|
|
110
|
+
VideoObjectCollection,
|
|
111
|
+
)
|
|
105
112
|
|
|
106
113
|
try:
|
|
107
114
|
from typing import Literal
|
|
@@ -2289,6 +2296,162 @@ class Inference:
|
|
|
2289
2296
|
inference_request.final_result = {"video_ann": video_ann_json}
|
|
2290
2297
|
return video_ann_json
|
|
2291
2298
|
|
|
2299
|
+
def _tracking_by_detection(self, api: Api, state: dict, inference_request: InferenceRequest):
|
|
2300
|
+
logger.debug("Inferring video_id...", extra={"state": state})
|
|
2301
|
+
inference_settings = self._get_inference_settings(state)
|
|
2302
|
+
logger.debug(f"Inference settings:", extra=inference_settings)
|
|
2303
|
+
batch_size = self._get_batch_size_from_state(state)
|
|
2304
|
+
video_id = get_value_for_keys(state, ["videoId", "video_id"], ignore_none=True)
|
|
2305
|
+
if video_id is None:
|
|
2306
|
+
raise ValueError("Video id is not provided")
|
|
2307
|
+
video_info = api.video.get_info_by_id(video_id)
|
|
2308
|
+
start_frame_index = get_value_for_keys(
|
|
2309
|
+
state, ["startFrameIndex", "start_frame_index", "start_frame"], ignore_none=True
|
|
2310
|
+
)
|
|
2311
|
+
if start_frame_index is None:
|
|
2312
|
+
start_frame_index = 0
|
|
2313
|
+
step = get_value_for_keys(state, ["stride", "step"], ignore_none=True)
|
|
2314
|
+
if step is None:
|
|
2315
|
+
step = 1
|
|
2316
|
+
end_frame_index = get_value_for_keys(
|
|
2317
|
+
state, ["endFrameIndex", "end_frame_index", "end_frame"], ignore_none=True
|
|
2318
|
+
)
|
|
2319
|
+
duration = state.get("duration", None)
|
|
2320
|
+
frames_count = get_value_for_keys(
|
|
2321
|
+
state, ["framesCount", "frames_count", "num_frames"], ignore_none=True
|
|
2322
|
+
)
|
|
2323
|
+
tracking = state.get("tracker", None)
|
|
2324
|
+
direction = state.get("direction", "forward")
|
|
2325
|
+
direction = 1 if direction == "forward" else -1
|
|
2326
|
+
track_id = get_value_for_keys(state, ["trackId", "track_id"], ignore_none=True)
|
|
2327
|
+
|
|
2328
|
+
if frames_count is not None:
|
|
2329
|
+
n_frames = frames_count
|
|
2330
|
+
elif end_frame_index is not None:
|
|
2331
|
+
n_frames = end_frame_index - start_frame_index
|
|
2332
|
+
elif duration is not None:
|
|
2333
|
+
fps = video_info.frames_count / video_info.duration
|
|
2334
|
+
n_frames = int(duration * fps)
|
|
2335
|
+
else:
|
|
2336
|
+
n_frames = video_info.frames_count
|
|
2337
|
+
|
|
2338
|
+
inference_request.tracker = self._tracker_init(state.get("tracker", None), state.get("tracker_settings", {}))
|
|
2339
|
+
|
|
2340
|
+
logger.debug(
|
|
2341
|
+
f"Video info:",
|
|
2342
|
+
extra=dict(
|
|
2343
|
+
w=video_info.frame_width,
|
|
2344
|
+
h=video_info.frame_height,
|
|
2345
|
+
start_frame_index=start_frame_index,
|
|
2346
|
+
n_frames=n_frames,
|
|
2347
|
+
),
|
|
2348
|
+
)
|
|
2349
|
+
|
|
2350
|
+
# start downloading video in background
|
|
2351
|
+
self.cache.run_cache_task_manually(api, None, video_id=video_id)
|
|
2352
|
+
|
|
2353
|
+
progress_total = (n_frames + step - 1) // step
|
|
2354
|
+
inference_request.set_stage(InferenceRequest.Stage.INFERENCE, 0, progress_total)
|
|
2355
|
+
|
|
2356
|
+
_upload_f = partial(
|
|
2357
|
+
self.upload_predictions_to_video,
|
|
2358
|
+
api=api,
|
|
2359
|
+
video_info=video_info,
|
|
2360
|
+
track_id=track_id,
|
|
2361
|
+
context=inference_request.context,
|
|
2362
|
+
progress_cb=inference_request.done,
|
|
2363
|
+
inference_request=inference_request,
|
|
2364
|
+
)
|
|
2365
|
+
|
|
2366
|
+
_range = (start_frame_index, start_frame_index + direction * n_frames)
|
|
2367
|
+
if _range[0] > _range[1]:
|
|
2368
|
+
_range = (_range[1], _range[0])
|
|
2369
|
+
|
|
2370
|
+
def _notify_f(predictions: List[Prediction]):
|
|
2371
|
+
logger.debug(
|
|
2372
|
+
"Notifying tracking progress...",
|
|
2373
|
+
extra={
|
|
2374
|
+
"track_id": track_id,
|
|
2375
|
+
"range": _range,
|
|
2376
|
+
"current": inference_request.progress.current,
|
|
2377
|
+
"total": inference_request.progress.total,
|
|
2378
|
+
},
|
|
2379
|
+
)
|
|
2380
|
+
stopped = self.api.video.notify_progress(
|
|
2381
|
+
track_id=track_id,
|
|
2382
|
+
video_id=video_info.id,
|
|
2383
|
+
frame_start=_range[0],
|
|
2384
|
+
frame_end=_range[1],
|
|
2385
|
+
current=inference_request.progress.current,
|
|
2386
|
+
total=inference_request.progress.total,
|
|
2387
|
+
)
|
|
2388
|
+
if stopped:
|
|
2389
|
+
inference_request.stop()
|
|
2390
|
+
logger.info("Tracking has been stopped by user", extra={"track_id": track_id})
|
|
2391
|
+
|
|
2392
|
+
def _exception_handler(e: Exception):
|
|
2393
|
+
self.api.video.notify_tracking_error(
|
|
2394
|
+
track_id=track_id,
|
|
2395
|
+
error=str(type(e)),
|
|
2396
|
+
message=str(e),
|
|
2397
|
+
)
|
|
2398
|
+
raise e
|
|
2399
|
+
|
|
2400
|
+
with Uploader(
|
|
2401
|
+
upload_f=_upload_f,
|
|
2402
|
+
notify_f=_notify_f,
|
|
2403
|
+
exception_handler=_exception_handler,
|
|
2404
|
+
logger=logger,
|
|
2405
|
+
) as uploader:
|
|
2406
|
+
for batch in batched(
|
|
2407
|
+
range(
|
|
2408
|
+
start_frame_index, start_frame_index + direction * n_frames, direction * step
|
|
2409
|
+
),
|
|
2410
|
+
batch_size,
|
|
2411
|
+
):
|
|
2412
|
+
if inference_request.is_stopped():
|
|
2413
|
+
logger.debug(
|
|
2414
|
+
f"Cancelling inference video...",
|
|
2415
|
+
extra={"inference_request_uuid": inference_request.uuid},
|
|
2416
|
+
)
|
|
2417
|
+
break
|
|
2418
|
+
logger.debug(
|
|
2419
|
+
f"Inferring frames {batch[0]}-{batch[-1]}:",
|
|
2420
|
+
)
|
|
2421
|
+
frames = self.cache.download_frames(
|
|
2422
|
+
api, video_info.id, batch, redownload_video=True
|
|
2423
|
+
)
|
|
2424
|
+
anns, slides_data = self._inference_auto(
|
|
2425
|
+
source=frames,
|
|
2426
|
+
settings=inference_settings,
|
|
2427
|
+
)
|
|
2428
|
+
|
|
2429
|
+
if inference_request.tracker is not None:
|
|
2430
|
+
anns = self._apply_tracker_to_anns(frames, anns, inference_request.tracker)
|
|
2431
|
+
|
|
2432
|
+
predictions = [
|
|
2433
|
+
Prediction(
|
|
2434
|
+
ann,
|
|
2435
|
+
model_meta=self.model_meta,
|
|
2436
|
+
frame_index=frame_index,
|
|
2437
|
+
video_id=video_info.id,
|
|
2438
|
+
dataset_id=video_info.dataset_id,
|
|
2439
|
+
project_id=video_info.project_id,
|
|
2440
|
+
)
|
|
2441
|
+
for ann, frame_index in zip(anns, batch)
|
|
2442
|
+
]
|
|
2443
|
+
for pred, this_slides_data in zip(predictions, slides_data):
|
|
2444
|
+
pred.extra_data["slides_data"] = this_slides_data
|
|
2445
|
+
uploader.put(predictions)
|
|
2446
|
+
video_ann_json = None
|
|
2447
|
+
if inference_request.tracker is not None:
|
|
2448
|
+
inference_request.set_stage("Postprocess...", 0, 1)
|
|
2449
|
+
video_ann_json = inference_request.tracker.video_annotation.to_json()
|
|
2450
|
+
inference_request.done()
|
|
2451
|
+
inference_request.final_result = {"video_ann": video_ann_json}
|
|
2452
|
+
return video_ann_json
|
|
2453
|
+
|
|
2454
|
+
|
|
2292
2455
|
def _inference_project_id(self, api: Api, state: dict, inference_request: InferenceRequest):
|
|
2293
2456
|
"""Inference project images.
|
|
2294
2457
|
If "output_project_id" in state, upload images and annotations to the output project.
|
|
@@ -2955,6 +3118,83 @@ class Inference:
|
|
|
2955
3118
|
inference_request.add_results(results)
|
|
2956
3119
|
inference_request.done(len(results))
|
|
2957
3120
|
|
|
3121
|
+
def upload_predictions_to_video(
|
|
3122
|
+
self,
|
|
3123
|
+
predictions: List[Prediction],
|
|
3124
|
+
api: Api,
|
|
3125
|
+
video_info: VideoInfo,
|
|
3126
|
+
track_id: str,
|
|
3127
|
+
context: Dict,
|
|
3128
|
+
progress_cb=None,
|
|
3129
|
+
inference_request: InferenceRequest = None,
|
|
3130
|
+
):
|
|
3131
|
+
key_id_map = KeyIdMap()
|
|
3132
|
+
project_meta = context.get("project_meta", None)
|
|
3133
|
+
if project_meta is None:
|
|
3134
|
+
project_meta = ProjectMeta.from_json(api.project.get_meta(video_info.project_id))
|
|
3135
|
+
context["project_meta"] = project_meta
|
|
3136
|
+
meta_changed = False
|
|
3137
|
+
for prediction in predictions:
|
|
3138
|
+
project_meta, ann, meta_changed_ = update_meta_and_ann(
|
|
3139
|
+
project_meta, prediction.annotation, None
|
|
3140
|
+
)
|
|
3141
|
+
prediction.annotation = ann
|
|
3142
|
+
meta_changed = meta_changed or meta_changed_
|
|
3143
|
+
if meta_changed:
|
|
3144
|
+
project_meta = api.project.update_meta(video_info.project_id, project_meta)
|
|
3145
|
+
context["project_meta"] = project_meta
|
|
3146
|
+
|
|
3147
|
+
figure_data_by_object_id = defaultdict(list)
|
|
3148
|
+
|
|
3149
|
+
tracks_to_object_ids = context.setdefault("tracks_to_object_ids", {})
|
|
3150
|
+
new_tracks: Dict[int, VideoObject] = {}
|
|
3151
|
+
for prediction in predictions:
|
|
3152
|
+
annotation = prediction.annotation
|
|
3153
|
+
tracks = annotation.custom_data
|
|
3154
|
+
for track, label in zip(tracks, annotation.labels):
|
|
3155
|
+
if track not in tracks_to_object_ids and track not in new_tracks:
|
|
3156
|
+
video_object = VideoObject(obj_class=label.obj_class)
|
|
3157
|
+
new_tracks[track] = video_object
|
|
3158
|
+
if new_tracks:
|
|
3159
|
+
tracks, video_objects = zip(*new_tracks.items())
|
|
3160
|
+
added_object_ids = api.video.object.append_bulk(
|
|
3161
|
+
video_info.id, VideoObjectCollection(video_objects), key_id_map=key_id_map
|
|
3162
|
+
)
|
|
3163
|
+
for track, object_id in zip(tracks, added_object_ids):
|
|
3164
|
+
tracks_to_object_ids[track] = object_id
|
|
3165
|
+
for prediction in predictions:
|
|
3166
|
+
annotation = prediction.annotation
|
|
3167
|
+
tracks = annotation.custom_data
|
|
3168
|
+
for track, label in zip(tracks, annotation.labels):
|
|
3169
|
+
object_id = tracks_to_object_ids[track]
|
|
3170
|
+
figure_data_by_object_id[object_id].append(
|
|
3171
|
+
{
|
|
3172
|
+
ApiField.OBJECT_ID: object_id,
|
|
3173
|
+
ApiField.GEOMETRY_TYPE: label.geometry.geometry_name(),
|
|
3174
|
+
ApiField.GEOMETRY: label.geometry.to_json(),
|
|
3175
|
+
ApiField.META: {ApiField.FRAME: prediction.frame_index},
|
|
3176
|
+
ApiField.TRACK_ID: track_id,
|
|
3177
|
+
}
|
|
3178
|
+
)
|
|
3179
|
+
|
|
3180
|
+
for object_id, figures_data in figure_data_by_object_id.items():
|
|
3181
|
+
figures_keys = [uuid.uuid4() for _ in figures_data]
|
|
3182
|
+
api.video.figure._append_bulk(
|
|
3183
|
+
entity_id=video_info.id,
|
|
3184
|
+
figures_json=figures_data,
|
|
3185
|
+
figures_keys=figures_keys,
|
|
3186
|
+
key_id_map=key_id_map,
|
|
3187
|
+
)
|
|
3188
|
+
logger.debug(f"Added {len(figures_data)} geometries to object #{object_id}")
|
|
3189
|
+
if progress_cb:
|
|
3190
|
+
progress_cb(len(predictions))
|
|
3191
|
+
if inference_request is not None:
|
|
3192
|
+
results = self._format_output(predictions)
|
|
3193
|
+
for result in results:
|
|
3194
|
+
result["annotation"] = None
|
|
3195
|
+
result["data"] = None
|
|
3196
|
+
inference_request.add_results(results)
|
|
3197
|
+
|
|
2958
3198
|
def serve(self):
|
|
2959
3199
|
if not self._use_gui and not self._is_cli_deploy:
|
|
2960
3200
|
Progress("Deploying model ...", 1)
|
|
@@ -3352,6 +3592,22 @@ class Inference:
|
|
|
3352
3592
|
"inference_request_uuid": inference_request.uuid,
|
|
3353
3593
|
}
|
|
3354
3594
|
|
|
3595
|
+
@server.post("/tracking_by_detection")
|
|
3596
|
+
def tracking_by_detection(response: Response, request: Request):
|
|
3597
|
+
state = request.state.state
|
|
3598
|
+
context = request.state.context
|
|
3599
|
+
state.update(context)
|
|
3600
|
+
if state.get("tracker") is None:
|
|
3601
|
+
state["tracker"] = "botsort"
|
|
3602
|
+
|
|
3603
|
+
logger.debug("Received a request to 'tracking_by_detection'", extra={"state": state})
|
|
3604
|
+
self.validate_inference_state(state)
|
|
3605
|
+
api = self.api_from_request(request)
|
|
3606
|
+
inference_request, future = self.inference_requests_manager.schedule_task(
|
|
3607
|
+
self._tracking_by_detection, api, state
|
|
3608
|
+
)
|
|
3609
|
+
return {"message": "Track task started."}
|
|
3610
|
+
|
|
3355
3611
|
@server.post("/inference_project_id_async")
|
|
3356
3612
|
def inference_project_id_async(response: Response, request: Request):
|
|
3357
3613
|
state = request.state.state
|
|
@@ -906,7 +906,7 @@ supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8
|
|
|
906
906
|
supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
|
|
907
907
|
supervisely/nn/inference/__init__.py,sha256=QFukX2ip-U7263aEPCF_UCFwj6EujbMnsgrXp5Bbt8I,1623
|
|
908
908
|
supervisely/nn/inference/cache.py,sha256=Hkxvu70rrB-j7ztQ4TBOxQePAxiKS7Erdb2FmK7aetY,35795
|
|
909
|
-
supervisely/nn/inference/inference.py,sha256=
|
|
909
|
+
supervisely/nn/inference/inference.py,sha256=2Unz6m_AnzVWTKy1Jv9G6VYYLvzu1Mbdl17SKIPZkAA,230530
|
|
910
910
|
supervisely/nn/inference/inference_request.py,sha256=yuqEL4BWjC-aKze_raGScEQyhHe8loYb_eNhGPsf2-4,14870
|
|
911
911
|
supervisely/nn/inference/session.py,sha256=WRJKVnmh5GPXnwtuKJn7AO1C7Td39wZo774ZIVQJGYk,36228
|
|
912
912
|
supervisely/nn/inference/uploader.py,sha256=Dn5MfMRq7tclEWpP0B9fJjTiQPBpwumfXxC8-lOYgnM,5659
|
|
@@ -1129,9 +1129,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
|
1129
1129
|
supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
|
|
1130
1130
|
supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
|
|
1131
1131
|
supervisely_lib/__init__.py,sha256=yRwzEQmVwSd6lUQoAUdBngKEOlnoQ6hA9ZcoZGJRNC4,331
|
|
1132
|
-
supervisely-6.73.
|
|
1133
|
-
supervisely-6.73.
|
|
1134
|
-
supervisely-6.73.
|
|
1135
|
-
supervisely-6.73.
|
|
1136
|
-
supervisely-6.73.
|
|
1137
|
-
supervisely-6.73.
|
|
1132
|
+
supervisely-6.73.464.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
1133
|
+
supervisely-6.73.464.dist-info/METADATA,sha256=LetWCHVdHoghuOfeeVXPlpSzx46mBvxCsBpAf1P1uYs,35604
|
|
1134
|
+
supervisely-6.73.464.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
|
1135
|
+
supervisely-6.73.464.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
|
|
1136
|
+
supervisely-6.73.464.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
|
|
1137
|
+
supervisely-6.73.464.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|