supervisely 6.73.357__py3-none-any.whl → 6.73.359__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/_utils.py +12 -0
- supervisely/api/annotation_api.py +3 -0
- supervisely/api/api.py +2 -2
- supervisely/api/app_api.py +27 -2
- supervisely/api/entity_annotation/tag_api.py +0 -1
- supervisely/api/nn/__init__.py +0 -0
- supervisely/api/nn/deploy_api.py +821 -0
- supervisely/api/nn/neural_network_api.py +248 -0
- supervisely/api/task_api.py +26 -467
- supervisely/app/fastapi/subapp.py +1 -0
- supervisely/nn/__init__.py +2 -1
- supervisely/nn/artifacts/artifacts.py +5 -5
- supervisely/nn/benchmark/object_detection/metric_provider.py +3 -0
- supervisely/nn/experiments.py +28 -5
- supervisely/nn/inference/cache.py +178 -114
- supervisely/nn/inference/gui/gui.py +18 -35
- supervisely/nn/inference/gui/serving_gui.py +3 -1
- supervisely/nn/inference/inference.py +1421 -1265
- supervisely/nn/inference/inference_request.py +412 -0
- supervisely/nn/inference/object_detection_3d/object_detection_3d.py +31 -24
- supervisely/nn/inference/session.py +2 -2
- supervisely/nn/inference/tracking/base_tracking.py +45 -79
- supervisely/nn/inference/tracking/bbox_tracking.py +220 -155
- supervisely/nn/inference/tracking/mask_tracking.py +274 -250
- supervisely/nn/inference/tracking/tracker_interface.py +23 -0
- supervisely/nn/inference/uploader.py +164 -0
- supervisely/nn/model/__init__.py +0 -0
- supervisely/nn/model/model_api.py +259 -0
- supervisely/nn/model/prediction.py +311 -0
- supervisely/nn/model/prediction_session.py +632 -0
- supervisely/nn/tracking/__init__.py +1 -0
- supervisely/nn/tracking/boxmot.py +114 -0
- supervisely/nn/tracking/tracking.py +24 -0
- supervisely/nn/training/train_app.py +61 -19
- supervisely/nn/utils.py +43 -3
- supervisely/task/progress.py +12 -2
- supervisely/video/video.py +107 -1
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/METADATA +2 -1
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/RECORD +43 -32
- supervisely/api/neural_network_api.py +0 -202
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/LICENSE +0 -0
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/WHEEL +0 -0
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.357.dist-info → supervisely-6.73.359.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
from typing import Iterable, List, Optional, Union
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from supervisely.annotation.annotation import Annotation
|
|
6
|
+
from supervisely.geometry.rectangle import Rectangle
|
|
7
|
+
from supervisely.nn.model.model_api import Prediction
|
|
8
|
+
from supervisely.video_annotation.frame import Frame
|
|
9
|
+
from supervisely.video_annotation.frame_collection import FrameCollection
|
|
10
|
+
from supervisely.video_annotation.video_annotation import VideoAnnotation
|
|
11
|
+
from supervisely.video_annotation.video_figure import VideoFigure
|
|
12
|
+
from supervisely.video_annotation.video_object import VideoObject
|
|
13
|
+
from supervisely.video_annotation.video_object_collection import VideoObjectCollection
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _none_generator():
|
|
17
|
+
while True:
|
|
18
|
+
yield None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def apply_boxmot(
|
|
22
|
+
tracker,
|
|
23
|
+
predictions: Union[List[Prediction], List[Annotation]],
|
|
24
|
+
class_names: List[str],
|
|
25
|
+
frames: Optional[Iterable[np.ndarray]] = None,
|
|
26
|
+
) -> VideoAnnotation:
|
|
27
|
+
if frames is None:
|
|
28
|
+
frames = _none_generator()
|
|
29
|
+
results = []
|
|
30
|
+
annotations = []
|
|
31
|
+
frames_count = 0
|
|
32
|
+
for prediction, frame in zip(predictions, frames):
|
|
33
|
+
frames_count += 1
|
|
34
|
+
if isinstance(prediction, Prediction):
|
|
35
|
+
annotation = prediction.annotation
|
|
36
|
+
if frame is None:
|
|
37
|
+
frame = prediction.load_image()
|
|
38
|
+
else:
|
|
39
|
+
annotation = prediction
|
|
40
|
+
frame_shape = frame.shape[:2]
|
|
41
|
+
annotations.append(annotation)
|
|
42
|
+
detections = to_boxes(annotation, class_names) # N x (x, y, x, y, conf, label)
|
|
43
|
+
tracks = tracker.update(
|
|
44
|
+
detections, frame
|
|
45
|
+
) # M x (x, y, x, y, track_id, conf, label, det_id)
|
|
46
|
+
results.append(tracks)
|
|
47
|
+
return create_video_annotation(annotations, results, class_names, frame_shape, frames_count)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def to_boxes(ann: Annotation, class_names: List[str]) -> np.ndarray:
|
|
51
|
+
"""
|
|
52
|
+
Convert annotation to detections array in boxmot format.
|
|
53
|
+
:param ann: Supervisely Annotation object
|
|
54
|
+
:type ann: Annotation
|
|
55
|
+
:param class_names: model class names
|
|
56
|
+
:type class_names: List[str]
|
|
57
|
+
:return: detections array N x (x, y, x, y, conf, label)
|
|
58
|
+
:rtype: np.ndarray
|
|
59
|
+
"""
|
|
60
|
+
# convert ann to N x (x, y, x, y, conf, cls) np.array
|
|
61
|
+
cls2label = {class_name: i for i, class_name in enumerate(class_names)}
|
|
62
|
+
detections = []
|
|
63
|
+
for label in ann.labels:
|
|
64
|
+
cat = cls2label[label.obj_class.name]
|
|
65
|
+
bbox = label.geometry.to_bbox()
|
|
66
|
+
conf = label.tags.get("confidence").value
|
|
67
|
+
detections.append([bbox.left, bbox.top, bbox.right, bbox.bottom, conf, cat])
|
|
68
|
+
detections = np.array(detections)
|
|
69
|
+
return detections
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def create_video_annotation(
|
|
73
|
+
annotations: List[Annotation],
|
|
74
|
+
tracking_results: list,
|
|
75
|
+
class_names: List[str],
|
|
76
|
+
frame_shape: tuple,
|
|
77
|
+
frames_count: int,
|
|
78
|
+
) -> VideoAnnotation:
|
|
79
|
+
img_h, img_w = frame_shape
|
|
80
|
+
video_objects = {} # track_id -> VideoObject
|
|
81
|
+
frames = []
|
|
82
|
+
cat2obj = {}
|
|
83
|
+
name2cat = {class_name: i for i, class_name in enumerate(class_names)}
|
|
84
|
+
obj_classes = {}
|
|
85
|
+
for annotation in annotations:
|
|
86
|
+
for label in annotation.labels:
|
|
87
|
+
obj_classes.setdefault(label.obj_class.name, label.obj_class)
|
|
88
|
+
for obj_name, cat in name2cat.items():
|
|
89
|
+
obj_class = obj_classes.get(obj_name)
|
|
90
|
+
if obj_class is None:
|
|
91
|
+
raise ValueError(f"Object class {obj_name} not found in annotations.")
|
|
92
|
+
cat2obj[cat] = obj_class
|
|
93
|
+
for i, tracks in enumerate(tracking_results):
|
|
94
|
+
frame_figures = []
|
|
95
|
+
for track in tracks:
|
|
96
|
+
# crop bbox to image size
|
|
97
|
+
dims = np.array([img_w, img_h, img_w, img_h]) - 1
|
|
98
|
+
track[:4] = np.clip(track[:4], 0, dims)
|
|
99
|
+
x1, y1, x2, y2, track_id, conf, cat = track[:7]
|
|
100
|
+
cat = int(cat)
|
|
101
|
+
track_id = int(track_id)
|
|
102
|
+
rect = Rectangle(y1, x1, y2, x2)
|
|
103
|
+
video_object = video_objects.setdefault(track_id, VideoObject(cat2obj[cat]))
|
|
104
|
+
frame_figures.append(VideoFigure(video_object, rect, i))
|
|
105
|
+
frames.append(Frame(i, frame_figures))
|
|
106
|
+
|
|
107
|
+
objects = list(video_objects.values())
|
|
108
|
+
video_annotation = VideoAnnotation(
|
|
109
|
+
img_size=frame_shape,
|
|
110
|
+
frames_count=frames_count,
|
|
111
|
+
objects=VideoObjectCollection(objects),
|
|
112
|
+
frames=FrameCollection(frames),
|
|
113
|
+
)
|
|
114
|
+
return video_annotation
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from typing import Any, Callable
|
|
2
|
+
|
|
3
|
+
from supervisely.nn.model.model_api import ModelAPI
|
|
4
|
+
from supervisely.nn.tracking.boxmot import apply_boxmot
|
|
5
|
+
from supervisely.video_annotation.video_annotation import VideoAnnotation
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _get_apply_fn(tracker: Any) -> Callable:
|
|
9
|
+
if tracker.__class__.__module__.startswith("boxmot"):
|
|
10
|
+
return apply_boxmot
|
|
11
|
+
else:
|
|
12
|
+
raise ValueError(
|
|
13
|
+
f"Tracker {tracker.__class__.__module__} is not supported. Please, use boxmot tracker."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def track(video_id: int, tracker, detector: ModelAPI, **kwargs) -> VideoAnnotation:
|
|
18
|
+
apply_fn = _get_apply_fn(tracker)
|
|
19
|
+
if "classes" in kwargs:
|
|
20
|
+
classes = kwargs["classes"]
|
|
21
|
+
else:
|
|
22
|
+
classes = detector.get_classes()
|
|
23
|
+
predictions = detector.predict_detached(video_id=video_id, **kwargs)
|
|
24
|
+
return apply_fn(tracker, predictions, classes)
|
|
@@ -525,19 +525,19 @@ class TrainApp:
|
|
|
525
525
|
if not success:
|
|
526
526
|
raise ValueError(f"{reason}. Failed to upload artifacts")
|
|
527
527
|
|
|
528
|
-
# Step 3.
|
|
529
|
-
|
|
528
|
+
# Step 3. Create model meta according to model CV task type
|
|
529
|
+
model_meta = self.create_model_meta(experiment_info["task_type"])
|
|
530
|
+
|
|
531
|
+
# Step 4. Preprocess artifacts
|
|
532
|
+
experiment_info = self._preprocess_artifacts(experiment_info, model_meta)
|
|
530
533
|
|
|
531
|
-
# Step
|
|
534
|
+
# Step 5. Postprocess splits
|
|
532
535
|
train_splits_data = self._postprocess_splits()
|
|
533
536
|
|
|
534
|
-
# Step
|
|
537
|
+
# Step 6. Upload artifacts
|
|
535
538
|
self._set_text_status("uploading")
|
|
536
539
|
remote_dir, file_info = self._upload_artifacts()
|
|
537
540
|
|
|
538
|
-
# Step 6. Create model meta according to model CV task type
|
|
539
|
-
model_meta = self.create_model_meta(experiment_info["task_type"])
|
|
540
|
-
|
|
541
541
|
# Step 7. [Optional] Run Model Benchmark
|
|
542
542
|
mb_eval_lnk_file_info, mb_eval_report = None, None
|
|
543
543
|
mb_eval_report_id, eval_metrics = None, {}
|
|
@@ -579,8 +579,10 @@ class TrainApp:
|
|
|
579
579
|
export_weights = {}
|
|
580
580
|
if self.gui.hyperparameters_selector.is_export_required():
|
|
581
581
|
try:
|
|
582
|
-
export_weights = self._export_weights(experiment_info)
|
|
583
|
-
export_weights = self._upload_export_weights(
|
|
582
|
+
export_weights, export_classes_path = self._export_weights(experiment_info)
|
|
583
|
+
export_weights = self._upload_export_weights(
|
|
584
|
+
export_weights, export_classes_path, remote_dir
|
|
585
|
+
)
|
|
584
586
|
except Exception as e:
|
|
585
587
|
logger.error(f"Export weights failed: {e}")
|
|
586
588
|
|
|
@@ -1345,7 +1347,7 @@ class TrainApp:
|
|
|
1345
1347
|
}
|
|
1346
1348
|
return splits_data
|
|
1347
1349
|
|
|
1348
|
-
def _preprocess_artifacts(self, experiment_info: dict) -> None:
|
|
1350
|
+
def _preprocess_artifacts(self, experiment_info: dict, model_meta: ProjectMeta) -> None:
|
|
1349
1351
|
"""
|
|
1350
1352
|
Preprocesses and move the artifacts generated by the training process to output directories.
|
|
1351
1353
|
|
|
@@ -1397,12 +1399,42 @@ class TrainApp:
|
|
|
1397
1399
|
|
|
1398
1400
|
new_checkpoint_paths = []
|
|
1399
1401
|
best_checkpoints_name = experiment_info["best_checkpoint"]
|
|
1402
|
+
|
|
1403
|
+
# Prepare model files
|
|
1404
|
+
try:
|
|
1405
|
+
model_files = {}
|
|
1406
|
+
for file in experiment_info["model_files"]:
|
|
1407
|
+
with open(experiment_info["model_files"][file], "r") as f:
|
|
1408
|
+
model_files[file] = f.read()
|
|
1409
|
+
except Exception as e:
|
|
1410
|
+
logger.warning(f"Error loading model files: {e}")
|
|
1411
|
+
model_files = {}
|
|
1412
|
+
|
|
1400
1413
|
for checkpoint_path in checkpoint_paths:
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
sly_fs.get_file_name_with_ext(checkpoint_path),
|
|
1404
|
-
)
|
|
1414
|
+
checkpoint_name = sly_fs.get_file_name_with_ext(checkpoint_path)
|
|
1415
|
+
new_checkpoint_path = join(self._output_checkpoints_dir, checkpoint_name)
|
|
1405
1416
|
shutil.move(checkpoint_path, new_checkpoint_path)
|
|
1417
|
+
if len(model_files) > 0:
|
|
1418
|
+
try:
|
|
1419
|
+
# pylint: disable=import-error
|
|
1420
|
+
import torch
|
|
1421
|
+
|
|
1422
|
+
state_dict = torch.load(new_checkpoint_path)
|
|
1423
|
+
state_dict["model_info"] = {
|
|
1424
|
+
"model_name": experiment_info["model_name"],
|
|
1425
|
+
"framework": self.framework_name,
|
|
1426
|
+
"checkpoint": checkpoint_name,
|
|
1427
|
+
"experiment": self.gui.training_process.get_experiment_name(),
|
|
1428
|
+
}
|
|
1429
|
+
state_dict["model_meta"] = model_meta.to_json()
|
|
1430
|
+
state_dict["model_files"] = model_files
|
|
1431
|
+
torch.save(state_dict, new_checkpoint_path)
|
|
1432
|
+
except Exception as e:
|
|
1433
|
+
logger.warning(
|
|
1434
|
+
f"Error writing info to checkpoint: '{checkpoint_name}'. Error:{e}"
|
|
1435
|
+
)
|
|
1436
|
+
continue
|
|
1437
|
+
|
|
1406
1438
|
new_checkpoint_paths.append(new_checkpoint_path)
|
|
1407
1439
|
if sly_fs.get_file_name_with_ext(checkpoint_path) == best_checkpoints_name:
|
|
1408
1440
|
experiment_info["best_checkpoint"] = new_checkpoint_path
|
|
@@ -2543,17 +2575,27 @@ class TrainApp:
|
|
|
2543
2575
|
export_weights[RuntimeType.TENSORRT] = tensorrt_path
|
|
2544
2576
|
except Exception as e:
|
|
2545
2577
|
logger.error(f"Failed to export TensorRT model: {e}")
|
|
2546
|
-
|
|
2547
|
-
|
|
2578
|
+
|
|
2579
|
+
export_classes_path = None
|
|
2580
|
+
if len(export_weights) > 0:
|
|
2581
|
+
export_classes = {idx: cls_name for idx, cls_name in enumerate(self.classes)}
|
|
2582
|
+
export_dir = join(self.output_dir, self._export_dir_name)
|
|
2583
|
+
sly_fs.mkdir(export_dir)
|
|
2584
|
+
export_classes_path = join(export_dir, "classes.json")
|
|
2585
|
+
sly_json.dump_json_file(export_classes, export_classes_path)
|
|
2586
|
+
return export_weights, export_classes_path
|
|
2548
2587
|
|
|
2549
2588
|
def _upload_export_weights(
|
|
2550
|
-
self, export_weights: Dict[str, str], remote_dir: str
|
|
2589
|
+
self, export_weights: Dict[str, str], export_classes_path: str, remote_dir: str
|
|
2551
2590
|
) -> Dict[str, str]:
|
|
2552
2591
|
"""Uploads export weights (any other specified formats) to Supervisely Team Files.
|
|
2553
2592
|
The default export is handled by the `_upload_artifacts` method."""
|
|
2554
2593
|
file_dest_paths = []
|
|
2555
2594
|
size = 0
|
|
2556
|
-
|
|
2595
|
+
files_to_upload = list(export_weights.values())
|
|
2596
|
+
if export_classes_path is not None:
|
|
2597
|
+
files_to_upload.append(export_classes_path)
|
|
2598
|
+
for path in files_to_upload:
|
|
2557
2599
|
file_name = sly_fs.get_file_name_with_ext(path)
|
|
2558
2600
|
file_dest_paths.append(join(remote_dir, self._export_dir_name, file_name))
|
|
2559
2601
|
size += sly_fs.get_file_size(path)
|
|
@@ -2568,7 +2610,7 @@ class TrainApp:
|
|
|
2568
2610
|
self.progress_bar_main.show()
|
|
2569
2611
|
self._api.file.upload_bulk_fast(
|
|
2570
2612
|
team_id=self.team_id,
|
|
2571
|
-
src_paths=
|
|
2613
|
+
src_paths=files_to_upload,
|
|
2572
2614
|
dst_paths=file_dest_paths,
|
|
2573
2615
|
progress_cb=export_upload_main_pbar.update,
|
|
2574
2616
|
)
|
supervisely/nn/utils.py
CHANGED
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
from dataclasses import dataclass
|
|
2
|
-
from typing import List
|
|
3
2
|
|
|
4
|
-
|
|
5
|
-
from supervisely.nn.experiments import ExperimentInfo
|
|
3
|
+
import psutil
|
|
6
4
|
|
|
7
5
|
|
|
8
6
|
class ModelSource:
|
|
@@ -61,6 +59,48 @@ def _get_model_name(model_info: dict):
|
|
|
61
59
|
name = model_info.get("model_name")
|
|
62
60
|
if not name:
|
|
63
61
|
name = model_info.get("meta", {}).get("model_name")
|
|
62
|
+
if not name:
|
|
63
|
+
name = model_info.get("Model")
|
|
64
64
|
if not name:
|
|
65
65
|
raise ValueError("Model name not found not in model_info nor in meta.")
|
|
66
66
|
return name
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def get_ram_usage():
|
|
70
|
+
memory = psutil.virtual_memory()
|
|
71
|
+
return memory.used, memory.total
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_gpu_usage(device: str = None):
|
|
75
|
+
if device == "cpu":
|
|
76
|
+
return None, None
|
|
77
|
+
try:
|
|
78
|
+
import torch
|
|
79
|
+
except Exception as e:
|
|
80
|
+
from supervisely import logger
|
|
81
|
+
logger.warning(f"Cannot import torch. Install PyTorch to get GPU usage info. Error: {e}")
|
|
82
|
+
return None, None
|
|
83
|
+
if not torch.cuda.is_available():
|
|
84
|
+
return None, None
|
|
85
|
+
gpu_index = None
|
|
86
|
+
if device is None or device in ["", "auto", "cuda"]:
|
|
87
|
+
gpu_index = torch.cuda.current_device()
|
|
88
|
+
elif isinstance(device, int):
|
|
89
|
+
gpu_index = device
|
|
90
|
+
elif device.startswith("cuda:"):
|
|
91
|
+
try:
|
|
92
|
+
gpu_index = int(device.split(":")[-1])
|
|
93
|
+
except ValueError:
|
|
94
|
+
return None, None
|
|
95
|
+
else:
|
|
96
|
+
for i in range(torch.cuda.device_count()):
|
|
97
|
+
if device == torch.cuda.get_device_name(i):
|
|
98
|
+
gpu_index = i
|
|
99
|
+
break
|
|
100
|
+
if gpu_index is None:
|
|
101
|
+
return None, None
|
|
102
|
+
if gpu_index is None or gpu_index > torch.cuda.device_count() or gpu_index < 0:
|
|
103
|
+
return None, None
|
|
104
|
+
allocated = torch.cuda.memory_allocated(gpu_index)
|
|
105
|
+
total = torch.cuda.get_device_properties(gpu_index).total_memory
|
|
106
|
+
return allocated, total
|
supervisely/task/progress.py
CHANGED
|
@@ -79,6 +79,8 @@ class Progress:
|
|
|
79
79
|
is_size: Optional[bool] = False,
|
|
80
80
|
need_info_log: Optional[bool] = False,
|
|
81
81
|
min_report_percent: Optional[int] = 1,
|
|
82
|
+
log_extra: Optional[Dict[str, str]] = None,
|
|
83
|
+
update_task_progress: Optional[bool] = True,
|
|
82
84
|
):
|
|
83
85
|
self.is_size = is_size
|
|
84
86
|
self.message = message
|
|
@@ -94,6 +96,8 @@ class Progress:
|
|
|
94
96
|
self.logger = logger if ext_logger is None else ext_logger
|
|
95
97
|
self.report_every = max(1, math.ceil((total_cnt or 0) / 100 * min_report_percent))
|
|
96
98
|
self.need_info_log = need_info_log
|
|
99
|
+
self.log_extra = log_extra
|
|
100
|
+
self.update_task_progress = update_task_progress
|
|
97
101
|
|
|
98
102
|
mb5 = 5 * 1024 * 1024
|
|
99
103
|
if self.is_size and self.is_total_unknown:
|
|
@@ -169,9 +173,15 @@ class Progress:
|
|
|
169
173
|
extra["current_label"] = self.current_label
|
|
170
174
|
extra["total_label"] = self.total_label
|
|
171
175
|
|
|
172
|
-
self.
|
|
176
|
+
if self.log_extra:
|
|
177
|
+
extra.update(self.log_extra)
|
|
178
|
+
|
|
179
|
+
if self.update_task_progress:
|
|
180
|
+
self.logger.info("progress", extra=extra)
|
|
173
181
|
if self.need_info_log is True:
|
|
174
|
-
self.logger.info(
|
|
182
|
+
self.logger.info(
|
|
183
|
+
f"{self.message} [{self.current_label} / {self.total_label}]", extra=self.log_extra
|
|
184
|
+
)
|
|
175
185
|
|
|
176
186
|
def need_report(self) -> bool:
|
|
177
187
|
if (
|
supervisely/video/video.py
CHANGED
|
@@ -4,7 +4,10 @@
|
|
|
4
4
|
from __future__ import annotations
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
|
-
from typing import Dict, List, Optional, Tuple
|
|
7
|
+
from typing import Dict, Generator, List, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
import cv2
|
|
10
|
+
import numpy as np
|
|
8
11
|
|
|
9
12
|
from supervisely import logger as default_logger
|
|
10
13
|
from supervisely._utils import abs_url, is_development, rand_str
|
|
@@ -519,3 +522,106 @@ def get_labeling_tool_link(url: str, name: Optional[str] = "open in labeling too
|
|
|
519
522
|
:rtype: str
|
|
520
523
|
"""
|
|
521
524
|
return f'<a href="{url}" rel="noopener noreferrer" target="_blank">{name}<i class="zmdi zmdi-open-in-new" style="margin-left: 5px"></i></a>'
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
class VideoFrameReader:
|
|
528
|
+
def __init__(self, video_path: str, frame_indexes: List[int] = None):
|
|
529
|
+
self.video_path = video_path
|
|
530
|
+
self.frame_indexes = frame_indexes
|
|
531
|
+
self.vr = None
|
|
532
|
+
self.cap = None
|
|
533
|
+
self.prev_idx = -1
|
|
534
|
+
|
|
535
|
+
def _ensure_initialized(self):
|
|
536
|
+
if self.vr is None and self.cap is None:
|
|
537
|
+
try:
|
|
538
|
+
import decord
|
|
539
|
+
|
|
540
|
+
self.vr = decord.VideoReader(str(self.video_path))
|
|
541
|
+
except ImportError:
|
|
542
|
+
default_logger.debug(
|
|
543
|
+
"Decord is not installed. Falling back to OpenCV for video reading."
|
|
544
|
+
)
|
|
545
|
+
self.cap = cv2.VideoCapture(str(self.video_path))
|
|
546
|
+
|
|
547
|
+
def close(self):
|
|
548
|
+
if self.vr is not None:
|
|
549
|
+
self.vr = None
|
|
550
|
+
if self.cap is not None:
|
|
551
|
+
self.cap.release()
|
|
552
|
+
self.cap = None
|
|
553
|
+
self.prev_idx = -1
|
|
554
|
+
|
|
555
|
+
def __enter__(self):
|
|
556
|
+
self._ensure_initialized()
|
|
557
|
+
return self
|
|
558
|
+
|
|
559
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
560
|
+
self.close()
|
|
561
|
+
|
|
562
|
+
def __del__(self):
|
|
563
|
+
self.close()
|
|
564
|
+
|
|
565
|
+
def iterate_frames(self, frame_indexes: List[int] = None) -> Generator[np.ndarray, None, None]:
|
|
566
|
+
self._ensure_initialized()
|
|
567
|
+
if frame_indexes is None:
|
|
568
|
+
frame_indexes = self.frame_indexes
|
|
569
|
+
if self.vr is not None:
|
|
570
|
+
if frame_indexes is None:
|
|
571
|
+
frame_indexes = range(len(self.vr))
|
|
572
|
+
for frame_index in frame_indexes:
|
|
573
|
+
frame = self.vr[frame_index]
|
|
574
|
+
yield frame.asnumpy()
|
|
575
|
+
else:
|
|
576
|
+
if frame_indexes is None:
|
|
577
|
+
frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
578
|
+
frame_indexes = range(frame_count)
|
|
579
|
+
for frame_index in frame_indexes:
|
|
580
|
+
if 1 > frame_index - self.prev_idx < 20:
|
|
581
|
+
while self.prev_idx < frame_index - 1:
|
|
582
|
+
self.cap.read()
|
|
583
|
+
if frame_index != self.prev_idx + 1:
|
|
584
|
+
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
|
|
585
|
+
ret, frame = self.cap.read()
|
|
586
|
+
if not ret:
|
|
587
|
+
raise KeyError(f"Frame {frame_index} not found in video {self.video_path}")
|
|
588
|
+
yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
589
|
+
self.prev_idx = frame_index
|
|
590
|
+
|
|
591
|
+
def read_frames(self, frame_indexes: List[int] = None) -> List[np.ndarray]:
|
|
592
|
+
return list(self.iterate_frames(frame_indexes))
|
|
593
|
+
|
|
594
|
+
def __iter__(self):
|
|
595
|
+
return self.iterate_frames()
|
|
596
|
+
|
|
597
|
+
def __next__(self):
|
|
598
|
+
if not hasattr(self, "_frame_generator"):
|
|
599
|
+
self._frame_generator = self.iterate_frames()
|
|
600
|
+
try:
|
|
601
|
+
return next(self._frame_generator)
|
|
602
|
+
except StopIteration:
|
|
603
|
+
self._frame_generator = None
|
|
604
|
+
raise
|
|
605
|
+
|
|
606
|
+
def frame_size(self):
|
|
607
|
+
self._ensure_initialized()
|
|
608
|
+
if self.vr is not None:
|
|
609
|
+
return self.vr[0].shape[:2]
|
|
610
|
+
else:
|
|
611
|
+
width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
612
|
+
height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
613
|
+
return height, width
|
|
614
|
+
|
|
615
|
+
def frames_count(self):
|
|
616
|
+
self._ensure_initialized()
|
|
617
|
+
if self.vr is not None:
|
|
618
|
+
return len(self.vr)
|
|
619
|
+
else:
|
|
620
|
+
return int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
621
|
+
|
|
622
|
+
def fps(self):
|
|
623
|
+
self._ensure_initialized()
|
|
624
|
+
if self.vr is not None:
|
|
625
|
+
return self.vr.get_avg_fps()
|
|
626
|
+
else:
|
|
627
|
+
return int(self.cap.get(cv2.CAP_PROP_FPS))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: supervisely
|
|
3
|
-
Version: 6.73.
|
|
3
|
+
Version: 6.73.359
|
|
4
4
|
Summary: Supervisely Python SDK.
|
|
5
5
|
Home-page: https://github.com/supervisely/supervisely
|
|
6
6
|
Author: Supervisely
|
|
@@ -128,6 +128,7 @@ Requires-Dist: scikit-learn; extra == "tracking"
|
|
|
128
128
|
Requires-Dist: faiss-gpu; extra == "tracking"
|
|
129
129
|
Requires-Dist: tabulate; extra == "tracking"
|
|
130
130
|
Requires-Dist: tensorboard; extra == "tracking"
|
|
131
|
+
Requires-Dist: decord; extra == "tracking"
|
|
131
132
|
Provides-Extra: training
|
|
132
133
|
Requires-Dist: pycocotools; extra == "training"
|
|
133
134
|
Requires-Dist: scikit-learn; extra == "training"
|