supervisely 6.73.357__py3-none-any.whl → 6.73.359__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/_utils.py +12 -0
- supervisely/api/annotation_api.py +3 -0
- supervisely/api/api.py +2 -2
- supervisely/api/app_api.py +27 -2
- supervisely/api/entity_annotation/tag_api.py +0 -1
- supervisely/api/nn/__init__.py +0 -0
- supervisely/api/nn/deploy_api.py +821 -0
- supervisely/api/nn/neural_network_api.py +248 -0
- supervisely/api/task_api.py +26 -467
- supervisely/app/fastapi/subapp.py +1 -0
- supervisely/nn/__init__.py +2 -1
- supervisely/nn/artifacts/artifacts.py +5 -5
- supervisely/nn/benchmark/object_detection/metric_provider.py +3 -0
- supervisely/nn/experiments.py +28 -5
- supervisely/nn/inference/cache.py +178 -114
- supervisely/nn/inference/gui/gui.py +18 -35
- supervisely/nn/inference/gui/serving_gui.py +3 -1
- supervisely/nn/inference/inference.py +1421 -1265
- supervisely/nn/inference/inference_request.py +412 -0
- supervisely/nn/inference/object_detection_3d/object_detection_3d.py +31 -24
- supervisely/nn/inference/session.py +2 -2
- supervisely/nn/inference/tracking/base_tracking.py +45 -79
- supervisely/nn/inference/tracking/bbox_tracking.py +220 -155
- supervisely/nn/inference/tracking/mask_tracking.py +274 -250
- supervisely/nn/inference/tracking/tracker_interface.py +23 -0
- supervisely/nn/inference/uploader.py +164 -0
- supervisely/nn/model/__init__.py +0 -0
- supervisely/nn/model/model_api.py +259 -0
- supervisely/nn/model/prediction.py +311 -0
- supervisely/nn/model/prediction_session.py +632 -0
- supervisely/nn/tracking/__init__.py +1 -0
- supervisely/nn/tracking/boxmot.py +114 -0
- supervisely/nn/tracking/tracking.py +24 -0
- supervisely/nn/training/train_app.py +61 -19
- supervisely/nn/utils.py +43 -3
- supervisely/task/progress.py +12 -2
- supervisely/video/video.py +107 -1
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/METADATA +2 -1
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/RECORD +43 -32
- supervisely/api/neural_network_api.py +0 -202
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/LICENSE +0 -0
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/WHEEL +0 -0
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/top_level.txt +0 -0
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import shutil
|
|
3
|
+
import tempfile
|
|
3
4
|
import time
|
|
4
5
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
5
6
|
from enum import Enum
|
|
6
7
|
from logging import Logger
|
|
7
8
|
from pathlib import Path
|
|
8
9
|
from threading import Lock, Thread
|
|
9
|
-
from typing import Any, Callable, Generator, List, Optional, Tuple, Union
|
|
10
|
+
from typing import Any, BinaryIO, Callable, Generator, List, Optional, Tuple, Union
|
|
10
11
|
|
|
11
12
|
import cv2
|
|
12
13
|
import numpy as np
|
|
@@ -15,8 +16,10 @@ from cachetools import Cache, LRUCache, TTLCache
|
|
|
15
16
|
from fastapi import BackgroundTasks, FastAPI, Form, Request, UploadFile
|
|
16
17
|
|
|
17
18
|
import supervisely as sly
|
|
19
|
+
import supervisely.io.env as env
|
|
18
20
|
from supervisely._utils import batched
|
|
19
21
|
from supervisely.io.fs import silent_remove
|
|
22
|
+
from supervisely.video.video import VideoFrameReader
|
|
20
23
|
|
|
21
24
|
|
|
22
25
|
class PersistentImageLRUCache(LRUCache):
|
|
@@ -139,16 +142,26 @@ class PersistentImageTTLCache(TTLCache):
|
|
|
139
142
|
if rm_base_folder:
|
|
140
143
|
shutil.rmtree(self._base_dir)
|
|
141
144
|
|
|
142
|
-
def save_image(self, key, image: np.ndarray) -> None:
|
|
145
|
+
def save_image(self, key, image: Union[np.ndarray, BinaryIO, bytes], ext=".png") -> None:
|
|
143
146
|
if not self._base_dir.exists():
|
|
144
147
|
self._base_dir.mkdir()
|
|
145
148
|
|
|
146
|
-
|
|
149
|
+
if ext is None or ext == "":
|
|
150
|
+
ext = ".png"
|
|
151
|
+
|
|
152
|
+
filepath = self._base_dir / Path(key).with_suffix(ext)
|
|
147
153
|
self[key] = filepath
|
|
148
154
|
|
|
149
155
|
if filepath.exists():
|
|
150
156
|
sly.logger.debug(f"Rewrite image {str(filepath)}")
|
|
151
|
-
|
|
157
|
+
if isinstance(image, np.ndarray):
|
|
158
|
+
sly.image.write(str(filepath), image)
|
|
159
|
+
elif isinstance(image, bytes):
|
|
160
|
+
with open(filepath, "wb") as f:
|
|
161
|
+
f.write(image)
|
|
162
|
+
else:
|
|
163
|
+
with open(filepath, "wb") as f:
|
|
164
|
+
shutil.copyfileobj(image, f)
|
|
152
165
|
|
|
153
166
|
def get_image_path(self, key: Any) -> Path:
|
|
154
167
|
return self[key]
|
|
@@ -156,16 +169,25 @@ class PersistentImageTTLCache(TTLCache):
|
|
|
156
169
|
def get_image(self, key: Any):
|
|
157
170
|
return sly.image.read(str(self[key]))
|
|
158
171
|
|
|
159
|
-
def save_video(self,
|
|
160
|
-
ext =
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
172
|
+
def save_video(self, key: Any, source: Union[str, Path, BinaryIO]) -> None:
|
|
173
|
+
ext = ""
|
|
174
|
+
if isinstance(source, Path):
|
|
175
|
+
ext = source.suffix
|
|
176
|
+
elif isinstance(source, str):
|
|
177
|
+
ext = Path(source).suffix
|
|
178
|
+
video_path = self._base_dir / f"video_{key}{ext}"
|
|
179
|
+
self[key] = video_path
|
|
180
|
+
|
|
181
|
+
if isinstance(source, (str, Path)):
|
|
182
|
+
if str(source) != str(video_path):
|
|
183
|
+
shutil.move(source, str(video_path))
|
|
184
|
+
else:
|
|
185
|
+
with open(video_path, "wb") as f:
|
|
186
|
+
shutil.copyfileobj(source, f)
|
|
187
|
+
sly.logger.debug(f"Video #{key} saved to {video_path}", extra={"video_id": key})
|
|
166
188
|
|
|
167
|
-
def get_video_path(self,
|
|
168
|
-
return self[
|
|
189
|
+
def get_video_path(self, key: Any) -> Path:
|
|
190
|
+
return self[key]
|
|
169
191
|
|
|
170
192
|
def save_project_meta(self, key, value):
|
|
171
193
|
self[key] = value
|
|
@@ -177,35 +199,6 @@ class PersistentImageTTLCache(TTLCache):
|
|
|
177
199
|
shutil.copyfile(str(self[name]), path)
|
|
178
200
|
|
|
179
201
|
|
|
180
|
-
class VideoFrameReader:
|
|
181
|
-
def __init__(self, video_path: str, frame_indexes: List[int]):
|
|
182
|
-
self.video_path = video_path
|
|
183
|
-
self.frame_indexes = frame_indexes
|
|
184
|
-
self.cap = None
|
|
185
|
-
self.prev_idx = -1
|
|
186
|
-
|
|
187
|
-
def __enter__(self):
|
|
188
|
-
self.cap = cv2.VideoCapture(str(self.video_path))
|
|
189
|
-
return self
|
|
190
|
-
|
|
191
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
192
|
-
if self.cap is not None:
|
|
193
|
-
self.cap.release()
|
|
194
|
-
|
|
195
|
-
def read_frames(self) -> Generator:
|
|
196
|
-
try:
|
|
197
|
-
for frame_index in self.frame_indexes:
|
|
198
|
-
if frame_index != self.prev_idx + 1:
|
|
199
|
-
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
|
|
200
|
-
ret, frame = self.cap.read()
|
|
201
|
-
if not ret:
|
|
202
|
-
raise KeyError(f"Frame {frame_index} not found in video {self.video_path}")
|
|
203
|
-
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
204
|
-
self.prev_idx = frame_index
|
|
205
|
-
finally:
|
|
206
|
-
self.cap.release()
|
|
207
|
-
|
|
208
|
-
|
|
209
202
|
class InferenceImageCache:
|
|
210
203
|
class _LoadType(Enum):
|
|
211
204
|
ImageId: str = "IMAGE"
|
|
@@ -218,7 +211,7 @@ class InferenceImageCache:
|
|
|
218
211
|
maxsize: int,
|
|
219
212
|
ttl: int,
|
|
220
213
|
is_persistent: bool = True,
|
|
221
|
-
base_folder: str =
|
|
214
|
+
base_folder: str = env.smart_cache_container_dir(),
|
|
222
215
|
log_progress: bool = False,
|
|
223
216
|
) -> None:
|
|
224
217
|
self.is_persistent = is_persistent
|
|
@@ -227,6 +220,7 @@ class InferenceImageCache:
|
|
|
227
220
|
self._lock = Lock()
|
|
228
221
|
self._load_queue = CacheOut(ttl=10 * 60)
|
|
229
222
|
self.log_progress = log_progress
|
|
223
|
+
self._download_executor = ThreadPoolExecutor(max_workers=5)
|
|
230
224
|
|
|
231
225
|
if is_persistent:
|
|
232
226
|
self._data_dir = Path(base_folder)
|
|
@@ -297,7 +291,7 @@ class InferenceImageCache:
|
|
|
297
291
|
def _read_frames_from_cached_video_iter(self, video_id, frame_indexes):
|
|
298
292
|
video_path = self._cache.get_video_path(video_id)
|
|
299
293
|
with VideoFrameReader(video_path, frame_indexes) as reader:
|
|
300
|
-
for frame in reader.
|
|
294
|
+
for frame in reader.iterate_frames():
|
|
301
295
|
yield frame
|
|
302
296
|
|
|
303
297
|
def _read_frames_from_cached_video(
|
|
@@ -317,6 +311,33 @@ class InferenceImageCache:
|
|
|
317
311
|
if frame is None:
|
|
318
312
|
raise KeyError(f"Frame {frame_index} not found in video {video_id}")
|
|
319
313
|
|
|
314
|
+
def get_video_frames_count(self, key):
|
|
315
|
+
"""
|
|
316
|
+
Returns number of frames in the video
|
|
317
|
+
"""
|
|
318
|
+
if not isinstance(self._cache, PersistentImageTTLCache):
|
|
319
|
+
raise ValueError("Video frames count can be obtained only for persistent cache")
|
|
320
|
+
video_path = self._cache.get_video_path(key)
|
|
321
|
+
return VideoFrameReader(video_path).frames_count()
|
|
322
|
+
|
|
323
|
+
def get_video_frame_size(self, key):
|
|
324
|
+
"""
|
|
325
|
+
Returns height and width of the video frame. (h, w)
|
|
326
|
+
"""
|
|
327
|
+
if not isinstance(self._cache, PersistentImageTTLCache):
|
|
328
|
+
raise ValueError("Video frame size can be obtained only for persistent cache")
|
|
329
|
+
video_path = self._cache.get_video_path(key)
|
|
330
|
+
return VideoFrameReader(video_path).frame_size()
|
|
331
|
+
|
|
332
|
+
def get_video_fps(self, key):
|
|
333
|
+
"""
|
|
334
|
+
Returns fps of the video
|
|
335
|
+
"""
|
|
336
|
+
if not isinstance(self._cache, PersistentImageTTLCache):
|
|
337
|
+
raise ValueError("Video fps can be obtained only for persistent cache")
|
|
338
|
+
video_path = self._cache.get_video_path(key)
|
|
339
|
+
return VideoFrameReader(video_path).fps()
|
|
340
|
+
|
|
320
341
|
def get_frames_from_cache(self, video_id: int, frame_indexes: List[int]) -> List[np.ndarray]:
|
|
321
342
|
if isinstance(self._cache, PersistentImageTTLCache) and video_id in self._cache:
|
|
322
343
|
return self._read_frames_from_cached_video(video_id, frame_indexes)
|
|
@@ -371,25 +392,32 @@ class InferenceImageCache:
|
|
|
371
392
|
) -> List[np.ndarray]:
|
|
372
393
|
return_images = kwargs.get("return_images", True)
|
|
373
394
|
redownload_video = kwargs.get("redownload_video", False)
|
|
395
|
+
progress_cb = kwargs.get("progress_cb", None)
|
|
374
396
|
|
|
375
397
|
if video_id in self._cache:
|
|
376
398
|
try:
|
|
377
|
-
|
|
399
|
+
frames = self.get_frames_from_cache(video_id, frame_indexes)
|
|
400
|
+
if progress_cb is not None:
|
|
401
|
+
progress_cb(len(frame_indexes))
|
|
402
|
+
if return_images:
|
|
403
|
+
return frames
|
|
378
404
|
except:
|
|
379
405
|
sly.logger.warning(
|
|
380
406
|
f"Frames {frame_indexes} not found in video {video_id}", exc_info=True
|
|
381
407
|
)
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
408
|
+
self._download_executor.submit(
|
|
409
|
+
self.download_video,
|
|
410
|
+
api,
|
|
411
|
+
video_id,
|
|
412
|
+
**{**kwargs, "return_images": False},
|
|
413
|
+
)
|
|
387
414
|
elif redownload_video:
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
415
|
+
self._download_executor.submit(
|
|
416
|
+
self.download_video,
|
|
417
|
+
api,
|
|
418
|
+
video_id,
|
|
419
|
+
**{**kwargs, "return_images": False},
|
|
420
|
+
)
|
|
393
421
|
|
|
394
422
|
def name_constuctor(frame_index: int):
|
|
395
423
|
return self._frame_name(video_id, frame_index)
|
|
@@ -403,34 +431,56 @@ class InferenceImageCache:
|
|
|
403
431
|
load_generator,
|
|
404
432
|
api.logger,
|
|
405
433
|
return_images,
|
|
434
|
+
progress_cb=progress_cb,
|
|
435
|
+
video_id=video_id,
|
|
406
436
|
)
|
|
407
437
|
|
|
408
|
-
def
|
|
438
|
+
def add_video_to_cache_by_io(self, video_id: int, video_io: BinaryIO) -> None:
|
|
439
|
+
if isinstance(self._cache, PersistentImageTTLCache):
|
|
440
|
+
with self._lock:
|
|
441
|
+
self._cache.save_video(video_id, source=video_io)
|
|
442
|
+
|
|
443
|
+
def add_video_to_cache(self, video_id: int, source: Union[str, Path, BinaryIO]) -> None:
|
|
409
444
|
"""
|
|
410
445
|
Adds video to cache.
|
|
411
446
|
"""
|
|
412
447
|
if isinstance(self._cache, PersistentImageTTLCache):
|
|
413
448
|
with self._lock:
|
|
414
|
-
self._cache.save_video(video_id,
|
|
449
|
+
self._cache.save_video(video_id, source)
|
|
415
450
|
self._load_queue.delete(video_id)
|
|
416
451
|
sly.logger.debug(f"Video #{video_id} added to cache", extra={"video_id": video_id})
|
|
417
452
|
else:
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
453
|
+
tmp_source = None
|
|
454
|
+
if not isinstance(source, (str, Path)):
|
|
455
|
+
with tempfile.NamedTemporaryFile(delete=False) as f:
|
|
456
|
+
shutil.copyfileobj(source, f)
|
|
457
|
+
tmp_source = f.name
|
|
458
|
+
source = tmp_source
|
|
459
|
+
try:
|
|
460
|
+
with VideoFrameReader(source) as reader:
|
|
461
|
+
for frame_index, frame in enumerate(reader):
|
|
462
|
+
self.add_frame_to_cache(frame, video_id, frame_index)
|
|
463
|
+
finally:
|
|
464
|
+
if tmp_source is not None:
|
|
465
|
+
silent_remove(tmp_source)
|
|
466
|
+
|
|
467
|
+
def add_image_to_cache(
|
|
468
|
+
self, key: str, image: Union[np.ndarray, BinaryIO, bytes], ext=None
|
|
469
|
+
) -> np.ndarray:
|
|
470
|
+
"""
|
|
471
|
+
Adds image to cache.
|
|
472
|
+
"""
|
|
473
|
+
with self._lock:
|
|
474
|
+
self._cache.save_image(key, image, ext)
|
|
475
|
+
self._load_queue.delete(key)
|
|
476
|
+
sly.logger.debug(f"Image {key} added to cache", extra={"image_id": key})
|
|
477
|
+
return self._cache.get_image(key)
|
|
478
|
+
|
|
479
|
+
def get_image_path(self, key) -> str:
|
|
480
|
+
return str(self._cache.get_image_path(key))
|
|
481
|
+
|
|
482
|
+
def get_video_path(self, key) -> str:
|
|
483
|
+
return str(self._cache.get_video_path(key))
|
|
434
484
|
|
|
435
485
|
def download_video(self, api: sly.Api, video_id: int, **kwargs) -> None:
|
|
436
486
|
"""
|
|
@@ -621,8 +671,7 @@ class InferenceImageCache:
|
|
|
621
671
|
state["video_id"] = video_id
|
|
622
672
|
state["frame_ranges"] = list_of_ids_ranges_or_hashes
|
|
623
673
|
|
|
624
|
-
|
|
625
|
-
thread.start()
|
|
674
|
+
self._download_executor.submit(self.cache_task, api=api, state=state)
|
|
626
675
|
|
|
627
676
|
def set_project_meta(self, project_id, project_meta):
|
|
628
677
|
pr_meta_name = self._project_meta_name(project_id)
|
|
@@ -725,49 +774,65 @@ class InferenceImageCache:
|
|
|
725
774
|
],
|
|
726
775
|
logger: Logger,
|
|
727
776
|
return_images: bool = True,
|
|
777
|
+
progress_cb=None,
|
|
778
|
+
video_id=None,
|
|
728
779
|
) -> Optional[List[np.ndarray]]:
|
|
729
|
-
indexes_to_load = []
|
|
730
780
|
pos_by_name = {}
|
|
731
781
|
all_frames = [None for _ in range(len(indexes))]
|
|
732
|
-
items = []
|
|
733
|
-
|
|
734
|
-
for pos, hash_or_id in enumerate(indexes):
|
|
735
|
-
name = name_cunstructor(hash_or_id)
|
|
736
|
-
self._wait_if_in_queue(name, logger)
|
|
737
|
-
|
|
738
|
-
if name not in self._cache:
|
|
739
|
-
self._load_queue.set(name, hash_or_id)
|
|
740
|
-
indexes_to_load.append(hash_or_id)
|
|
741
|
-
pos_by_name[name] = pos
|
|
742
|
-
elif return_images is True:
|
|
743
|
-
items.append((pos, name))
|
|
744
782
|
|
|
745
783
|
def get_one_image(item):
|
|
746
|
-
pos,
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
for
|
|
757
|
-
name = name_cunstructor(
|
|
758
|
-
self.
|
|
784
|
+
pos, index = item
|
|
785
|
+
if video_id in self._cache:
|
|
786
|
+
return pos, self.get_frame_from_cache(video_id, index)
|
|
787
|
+
return pos, self._cache.get_image(name_cunstructor(index))
|
|
788
|
+
|
|
789
|
+
position = 0
|
|
790
|
+
batch_size = 4
|
|
791
|
+
for batch in batched(indexes, batch_size):
|
|
792
|
+
indexes_to_load = []
|
|
793
|
+
items = []
|
|
794
|
+
for hash_or_id in batch:
|
|
795
|
+
name = name_cunstructor(hash_or_id)
|
|
796
|
+
self._wait_if_in_queue(name, logger)
|
|
797
|
+
|
|
798
|
+
if name not in self._cache and video_id not in self._cache:
|
|
799
|
+
self._load_queue.set(name, hash_or_id)
|
|
800
|
+
indexes_to_load.append(hash_or_id)
|
|
801
|
+
pos_by_name[name] = position
|
|
802
|
+
elif return_images is True:
|
|
803
|
+
items.append((position, hash_or_id))
|
|
804
|
+
position += 1
|
|
805
|
+
|
|
806
|
+
if len(items) > 0:
|
|
807
|
+
with ThreadPoolExecutor(min(64, len(items))) as executor:
|
|
808
|
+
for pos, image in executor.map(get_one_image, items):
|
|
809
|
+
all_frames[pos] = image
|
|
810
|
+
if progress_cb is not None:
|
|
811
|
+
progress_cb()
|
|
759
812
|
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
813
|
+
download_time = time.monotonic()
|
|
814
|
+
if len(indexes_to_load) > 0:
|
|
815
|
+
for id_or_hash, image in load_generator(indexes_to_load):
|
|
816
|
+
name = name_cunstructor(id_or_hash)
|
|
817
|
+
self._add_to_cache(name, image)
|
|
818
|
+
|
|
819
|
+
if return_images:
|
|
820
|
+
pos = pos_by_name[name]
|
|
821
|
+
all_frames[pos] = image
|
|
822
|
+
if progress_cb is not None:
|
|
823
|
+
progress_cb()
|
|
824
|
+
download_time = time.monotonic() - download_time
|
|
825
|
+
|
|
826
|
+
# logger.debug(f"All stored files: {sorted(os.listdir(self.tmp_path))}")
|
|
827
|
+
if indexes_to_load:
|
|
828
|
+
indexes_to_load = list(indexes_to_load)
|
|
829
|
+
logger.debug(
|
|
830
|
+
f"Images/Frames added to cache: {indexes_to_load} in {download_time:.2f} sec",
|
|
831
|
+
extra={"indexes": indexes_to_load, "download_time": download_time},
|
|
832
|
+
)
|
|
833
|
+
found = set(batch).difference(indexes_to_load)
|
|
834
|
+
if found:
|
|
835
|
+
logger.debug(f"Images/Frames found in cache: {list(found)}")
|
|
771
836
|
|
|
772
837
|
if return_images:
|
|
773
838
|
return all_frames
|
|
@@ -803,7 +868,7 @@ class InferenceImageCache:
|
|
|
803
868
|
frame_index_to_path = {}
|
|
804
869
|
for frame_index, path in zip(this_frame_indexes[:5], this_paths[:5]):
|
|
805
870
|
frame_index_to_path[frame_index] = path
|
|
806
|
-
futures.append(
|
|
871
|
+
futures.append(self._download_executor.submit(_download_frame, frame_index))
|
|
807
872
|
for future in as_completed(futures):
|
|
808
873
|
frame_index, name = future.result()
|
|
809
874
|
path = frame_index_to_path[frame_index]
|
|
@@ -815,5 +880,4 @@ class InferenceImageCache:
|
|
|
815
880
|
|
|
816
881
|
# optimization for frame read from video file
|
|
817
882
|
frame_indexes, paths = zip(*sorted(zip(frame_indexes, paths), key=lambda x: x[0]))
|
|
818
|
-
executor = ThreadPoolExecutor(max_workers=5)
|
|
819
883
|
_download_and_save(frame_indexes, paths)
|
|
@@ -5,9 +5,10 @@
|
|
|
5
5
|
from functools import wraps
|
|
6
6
|
from typing import Callable, Dict, List, Optional, Union
|
|
7
7
|
|
|
8
|
+
import yaml
|
|
9
|
+
|
|
8
10
|
import supervisely.app.widgets as Widgets
|
|
9
11
|
import supervisely.io.env as env
|
|
10
|
-
import yaml
|
|
11
12
|
from supervisely import Api
|
|
12
13
|
from supervisely._utils import abs_url, is_debug_with_sly_net, is_development
|
|
13
14
|
from supervisely.api.file_api import FileApi
|
|
@@ -96,9 +97,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
96
97
|
self._serve_button = Widgets.Button("SERVE")
|
|
97
98
|
self._success_label = Widgets.DoneLabel()
|
|
98
99
|
self._success_label.hide()
|
|
99
|
-
self._download_progress = Widgets.Progress(
|
|
100
|
-
"Downloading model...", hide_on_finish=True
|
|
101
|
-
)
|
|
100
|
+
self._download_progress = Widgets.Progress("Downloading model...", hide_on_finish=True)
|
|
102
101
|
self._download_progress.hide()
|
|
103
102
|
self._change_model_button = Widgets.Button(
|
|
104
103
|
"STOP AND CHOOSE ANOTHER MODEL", button_type="danger"
|
|
@@ -124,9 +123,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
124
123
|
self._model_classes_widget = Widgets.ClassesTable(selectable=False)
|
|
125
124
|
self._model_classes_plug = Widgets.Text("No classes provided")
|
|
126
125
|
self._model_classes_widget_container = Widgets.Field(
|
|
127
|
-
content=Widgets.Container(
|
|
128
|
-
[self._model_classes_widget, self._model_classes_plug]
|
|
129
|
-
),
|
|
126
|
+
content=Widgets.Container([self._model_classes_widget, self._model_classes_plug]),
|
|
130
127
|
title="Model classes",
|
|
131
128
|
description="List of classes model predicts",
|
|
132
129
|
)
|
|
@@ -155,9 +152,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
155
152
|
|
|
156
153
|
self._model_full_info_card.collapse()
|
|
157
154
|
self._additional_ui_content = []
|
|
158
|
-
self.get_ui = self.__add_content_and_model_info_to_default_ui(
|
|
159
|
-
self._model_full_info_card
|
|
160
|
-
)
|
|
155
|
+
self.get_ui = self.__add_content_and_model_info_to_default_ui(self._model_full_info_card)
|
|
161
156
|
|
|
162
157
|
tabs_titles = []
|
|
163
158
|
tabs_contents = []
|
|
@@ -171,9 +166,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
171
166
|
def update_table(selected_model):
|
|
172
167
|
cols = [
|
|
173
168
|
model_key
|
|
174
|
-
for model_key in self._models[selected_model]["checkpoints"][
|
|
175
|
-
0
|
|
176
|
-
].keys()
|
|
169
|
+
for model_key in self._models[selected_model]["checkpoints"][0].keys()
|
|
177
170
|
]
|
|
178
171
|
rows = [
|
|
179
172
|
[value for param_name, value in model.items()]
|
|
@@ -262,9 +255,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
262
255
|
custom_tab_content = Widgets.Container(custom_tab_widgets)
|
|
263
256
|
tabs_titles.append("Custom models")
|
|
264
257
|
tabs_contents.append(custom_tab_content)
|
|
265
|
-
tabs_descriptions.append(
|
|
266
|
-
"Models trained in Supervisely and located in Team Files"
|
|
267
|
-
)
|
|
258
|
+
tabs_descriptions.append("Models trained in Supervisely and located in Team Files")
|
|
268
259
|
|
|
269
260
|
self._tabs = Widgets.RadioTabs(
|
|
270
261
|
titles=tabs_titles,
|
|
@@ -272,9 +263,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
272
263
|
descriptions=tabs_descriptions,
|
|
273
264
|
)
|
|
274
265
|
|
|
275
|
-
self.on_change_model_callbacks: List[CallbackT] = [
|
|
276
|
-
InferenceGUI._hide_info_after_change
|
|
277
|
-
]
|
|
266
|
+
self.on_change_model_callbacks: List[CallbackT] = [InferenceGUI._hide_info_after_change]
|
|
278
267
|
self.on_serve_callbacks: List[CallbackT] = []
|
|
279
268
|
|
|
280
269
|
@self.serve_button.click
|
|
@@ -302,7 +291,9 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
302
291
|
self._models_table.enable()
|
|
303
292
|
if self._support_custom_models:
|
|
304
293
|
self._model_path_input.enable()
|
|
305
|
-
|
|
294
|
+
# @TODO: Ask web team to add message to list of request ready messages
|
|
295
|
+
# Progress("model deployment canceled", 1).iter_done_report()
|
|
296
|
+
Progress("Application is started ...", 1).iter_done_report()
|
|
306
297
|
|
|
307
298
|
def _hide_info_after_change(self):
|
|
308
299
|
self._model_full_info_card.collapse()
|
|
@@ -345,9 +336,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
345
336
|
|
|
346
337
|
table_subtitles, cols = self._get_table_subtitles(cols)
|
|
347
338
|
if self._models_table is None:
|
|
348
|
-
self._models_table = Widgets.RadioTable(
|
|
349
|
-
cols, rows, subtitles=table_subtitles
|
|
350
|
-
)
|
|
339
|
+
self._models_table = Widgets.RadioTable(cols, rows, subtitles=table_subtitles)
|
|
351
340
|
else:
|
|
352
341
|
self._models_table.set_data(cols, rows, subtitles=table_subtitles)
|
|
353
342
|
|
|
@@ -441,7 +430,7 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
441
430
|
|
|
442
431
|
def set_project_meta(self, inference):
|
|
443
432
|
if self._get_classes_from_inference(inference) is None:
|
|
444
|
-
logger.
|
|
433
|
+
logger.warning("Skip loading project meta.")
|
|
445
434
|
self._model_classes_widget.hide()
|
|
446
435
|
self._model_classes_plug.show()
|
|
447
436
|
return
|
|
@@ -476,28 +465,22 @@ class InferenceGUI(BaseInferenceGUI):
|
|
|
476
465
|
# self.show_deployed_model_info(inference)
|
|
477
466
|
|
|
478
467
|
# else:
|
|
479
|
-
# logger.
|
|
468
|
+
# logger.warning("Failed to create handler for models table")
|
|
480
469
|
|
|
481
470
|
def _get_classes_from_inference(self, inference) -> Optional[List[str]]:
|
|
482
471
|
classes = None
|
|
483
472
|
try:
|
|
484
473
|
classes = inference.get_classes()
|
|
485
474
|
except NotImplementedError:
|
|
486
|
-
logger.
|
|
487
|
-
f"get_classes() function not implemented for {type(inference)} object."
|
|
488
|
-
)
|
|
475
|
+
logger.warning(f"get_classes() function not implemented for {type(inference)} object.")
|
|
489
476
|
except AttributeError:
|
|
490
|
-
logger.
|
|
491
|
-
"Probably, get_classes() function not working without model deploy."
|
|
492
|
-
)
|
|
477
|
+
logger.warning("Probably, get_classes() function not working without model deploy.")
|
|
493
478
|
except Exception as exc:
|
|
494
|
-
logger.
|
|
479
|
+
logger.warning("Skip getting classes info due to exception")
|
|
495
480
|
logger.exception(exc)
|
|
496
481
|
|
|
497
482
|
if classes is None or len(classes) == 0:
|
|
498
|
-
logger.
|
|
499
|
-
f"get_classes() function return {classes}; skip classes processing."
|
|
500
|
-
)
|
|
483
|
+
logger.warning(f"get_classes() function return {classes}; skip classes processing.")
|
|
501
484
|
return None
|
|
502
485
|
return classes
|
|
503
486
|
|
|
@@ -129,7 +129,9 @@ class ServingGUI:
|
|
|
129
129
|
self._device_select._select.enable()
|
|
130
130
|
self._device_select.enable()
|
|
131
131
|
self._change_model_button.hide()
|
|
132
|
-
|
|
132
|
+
# @TODO: Ask web team to add message to list of request ready messages
|
|
133
|
+
# Progress("model deployment canceled", 1).iter_done_report()
|
|
134
|
+
Progress("Application is started ...", 1).iter_done_report()
|
|
133
135
|
|
|
134
136
|
def _hide_info_after_change(self):
|
|
135
137
|
self._model_full_info_card.collapse()
|