supervisely 6.73.410__py3-none-any.whl → 6.73.470__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/__init__.py +136 -1
- supervisely/_utils.py +81 -0
- supervisely/annotation/json_geometries_map.py +2 -0
- supervisely/annotation/label.py +80 -3
- supervisely/api/annotation_api.py +9 -9
- supervisely/api/api.py +67 -43
- supervisely/api/app_api.py +72 -5
- supervisely/api/dataset_api.py +108 -33
- supervisely/api/entity_annotation/figure_api.py +113 -49
- supervisely/api/image_api.py +82 -0
- supervisely/api/module_api.py +10 -0
- supervisely/api/nn/deploy_api.py +15 -9
- supervisely/api/nn/ecosystem_models_api.py +201 -0
- supervisely/api/nn/neural_network_api.py +12 -3
- supervisely/api/pointcloud/pointcloud_api.py +38 -0
- supervisely/api/pointcloud/pointcloud_episode_annotation_api.py +3 -0
- supervisely/api/project_api.py +213 -6
- supervisely/api/task_api.py +11 -1
- supervisely/api/video/video_annotation_api.py +4 -2
- supervisely/api/video/video_api.py +79 -1
- supervisely/api/video/video_figure_api.py +24 -11
- supervisely/api/volume/volume_api.py +38 -0
- supervisely/app/__init__.py +1 -1
- supervisely/app/content.py +14 -6
- supervisely/app/fastapi/__init__.py +1 -0
- supervisely/app/fastapi/custom_static_files.py +1 -1
- supervisely/app/fastapi/multi_user.py +88 -0
- supervisely/app/fastapi/subapp.py +175 -42
- supervisely/app/fastapi/templating.py +1 -1
- supervisely/app/fastapi/websocket.py +77 -9
- supervisely/app/singleton.py +21 -0
- supervisely/app/v1/app_service.py +18 -2
- supervisely/app/v1/constants.py +7 -1
- supervisely/app/widgets/__init__.py +11 -1
- supervisely/app/widgets/agent_selector/template.html +1 -0
- supervisely/app/widgets/card/card.py +20 -0
- supervisely/app/widgets/dataset_thumbnail/dataset_thumbnail.py +11 -2
- supervisely/app/widgets/dataset_thumbnail/template.html +3 -1
- supervisely/app/widgets/deploy_model/deploy_model.py +750 -0
- supervisely/app/widgets/dialog/dialog.py +12 -0
- supervisely/app/widgets/dialog/template.html +2 -1
- supervisely/app/widgets/dropdown_checkbox_selector/__init__.py +0 -0
- supervisely/app/widgets/dropdown_checkbox_selector/dropdown_checkbox_selector.py +87 -0
- supervisely/app/widgets/dropdown_checkbox_selector/template.html +12 -0
- supervisely/app/widgets/ecosystem_model_selector/__init__.py +0 -0
- supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +195 -0
- supervisely/app/widgets/experiment_selector/experiment_selector.py +454 -263
- supervisely/app/widgets/fast_table/fast_table.py +713 -126
- supervisely/app/widgets/fast_table/script.js +492 -95
- supervisely/app/widgets/fast_table/style.css +54 -0
- supervisely/app/widgets/fast_table/template.html +45 -5
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +523 -0
- supervisely/app/widgets/heatmap/script.js +378 -0
- supervisely/app/widgets/heatmap/style.css +227 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/input_tag/input_tag.py +102 -15
- supervisely/app/widgets/input_tag_list/__init__.py +0 -0
- supervisely/app/widgets/input_tag_list/input_tag_list.py +274 -0
- supervisely/app/widgets/input_tag_list/template.html +70 -0
- supervisely/app/widgets/radio_table/radio_table.py +10 -2
- supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
- supervisely/app/widgets/radio_tabs/template.html +1 -0
- supervisely/app/widgets/select/select.py +6 -4
- supervisely/app/widgets/select_dataset/select_dataset.py +6 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +83 -7
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/app/widgets/tabs/tabs.py +22 -6
- supervisely/app/widgets/tabs/template.html +5 -1
- supervisely/app/widgets/transfer/style.css +3 -0
- supervisely/app/widgets/transfer/template.html +3 -1
- supervisely/app/widgets/transfer/transfer.py +48 -45
- supervisely/app/widgets/tree_select/tree_select.py +2 -0
- supervisely/convert/image/csv/csv_converter.py +24 -15
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +43 -41
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +75 -51
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +137 -124
- supervisely/convert/video/video_converter.py +2 -2
- supervisely/geometry/polyline_3d.py +110 -0
- supervisely/io/env.py +161 -1
- supervisely/nn/artifacts/__init__.py +1 -1
- supervisely/nn/artifacts/artifacts.py +10 -2
- supervisely/nn/artifacts/detectron2.py +1 -0
- supervisely/nn/artifacts/hrda.py +1 -0
- supervisely/nn/artifacts/mmclassification.py +20 -0
- supervisely/nn/artifacts/mmdetection.py +5 -3
- supervisely/nn/artifacts/mmsegmentation.py +1 -0
- supervisely/nn/artifacts/ritm.py +1 -0
- supervisely/nn/artifacts/rtdetr.py +1 -0
- supervisely/nn/artifacts/unet.py +1 -0
- supervisely/nn/artifacts/utils.py +3 -0
- supervisely/nn/artifacts/yolov5.py +2 -0
- supervisely/nn/artifacts/yolov8.py +1 -0
- supervisely/nn/benchmark/semantic_segmentation/metric_provider.py +18 -18
- supervisely/nn/experiments.py +9 -0
- supervisely/nn/inference/cache.py +37 -17
- supervisely/nn/inference/gui/serving_gui_template.py +39 -13
- supervisely/nn/inference/inference.py +953 -211
- supervisely/nn/inference/inference_request.py +15 -8
- supervisely/nn/inference/instance_segmentation/instance_segmentation.py +1 -0
- supervisely/nn/inference/object_detection/object_detection.py +1 -0
- supervisely/nn/inference/predict_app/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/__init__.py +0 -0
- supervisely/nn/inference/predict_app/gui/classes_selector.py +160 -0
- supervisely/nn/inference/predict_app/gui/gui.py +915 -0
- supervisely/nn/inference/predict_app/gui/input_selector.py +344 -0
- supervisely/nn/inference/predict_app/gui/model_selector.py +77 -0
- supervisely/nn/inference/predict_app/gui/output_selector.py +179 -0
- supervisely/nn/inference/predict_app/gui/preview.py +93 -0
- supervisely/nn/inference/predict_app/gui/settings_selector.py +881 -0
- supervisely/nn/inference/predict_app/gui/tags_selector.py +110 -0
- supervisely/nn/inference/predict_app/gui/utils.py +399 -0
- supervisely/nn/inference/predict_app/predict_app.py +176 -0
- supervisely/nn/inference/session.py +47 -39
- supervisely/nn/inference/tracking/bbox_tracking.py +5 -1
- supervisely/nn/inference/tracking/point_tracking.py +5 -1
- supervisely/nn/inference/tracking/tracker_interface.py +4 -0
- supervisely/nn/inference/uploader.py +9 -5
- supervisely/nn/model/model_api.py +44 -22
- supervisely/nn/model/prediction.py +15 -1
- supervisely/nn/model/prediction_session.py +70 -14
- supervisely/nn/prediction_dto.py +7 -0
- supervisely/nn/tracker/__init__.py +6 -8
- supervisely/nn/tracker/base_tracker.py +54 -0
- supervisely/nn/tracker/botsort/__init__.py +1 -0
- supervisely/nn/tracker/botsort/botsort_config.yaml +30 -0
- supervisely/nn/tracker/botsort/osnet_reid/__init__.py +0 -0
- supervisely/nn/tracker/botsort/osnet_reid/osnet.py +566 -0
- supervisely/nn/tracker/botsort/osnet_reid/osnet_reid_interface.py +88 -0
- supervisely/nn/tracker/botsort/tracker/__init__.py +0 -0
- supervisely/nn/tracker/{bot_sort → botsort/tracker}/basetrack.py +1 -2
- supervisely/nn/tracker/{utils → botsort/tracker}/gmc.py +51 -59
- supervisely/nn/tracker/{deep_sort/deep_sort → botsort/tracker}/kalman_filter.py +71 -33
- supervisely/nn/tracker/botsort/tracker/matching.py +202 -0
- supervisely/nn/tracker/{bot_sort/bot_sort.py → botsort/tracker/mc_bot_sort.py} +68 -81
- supervisely/nn/tracker/botsort_tracker.py +273 -0
- supervisely/nn/tracker/calculate_metrics.py +264 -0
- supervisely/nn/tracker/utils.py +273 -0
- supervisely/nn/tracker/visualize.py +520 -0
- supervisely/nn/training/gui/gui.py +152 -49
- supervisely/nn/training/gui/hyperparameters_selector.py +1 -1
- supervisely/nn/training/gui/model_selector.py +8 -6
- supervisely/nn/training/gui/train_val_splits_selector.py +144 -71
- supervisely/nn/training/gui/training_artifacts.py +3 -1
- supervisely/nn/training/train_app.py +225 -46
- supervisely/project/pointcloud_episode_project.py +12 -8
- supervisely/project/pointcloud_project.py +12 -8
- supervisely/project/project.py +221 -75
- supervisely/template/experiment/experiment.html.jinja +105 -55
- supervisely/template/experiment/experiment_generator.py +258 -112
- supervisely/template/experiment/header.html.jinja +31 -13
- supervisely/template/experiment/sly-style.css +7 -2
- supervisely/versions.json +3 -1
- supervisely/video/sampling.py +42 -20
- supervisely/video/video.py +41 -12
- supervisely/video_annotation/video_figure.py +38 -4
- supervisely/volume/stl_converter.py +2 -0
- supervisely/worker_api/agent_rpc.py +24 -1
- supervisely/worker_api/rpc_servicer.py +31 -7
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/METADATA +22 -14
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/RECORD +167 -148
- supervisely_lib/__init__.py +6 -1
- supervisely/app/widgets/experiment_selector/style.css +0 -27
- supervisely/app/widgets/experiment_selector/template.html +0 -61
- supervisely/nn/tracker/bot_sort/__init__.py +0 -21
- supervisely/nn/tracker/bot_sort/fast_reid_interface.py +0 -152
- supervisely/nn/tracker/bot_sort/matching.py +0 -127
- supervisely/nn/tracker/bot_sort/sly_tracker.py +0 -401
- supervisely/nn/tracker/deep_sort/__init__.py +0 -6
- supervisely/nn/tracker/deep_sort/deep_sort/__init__.py +0 -1
- supervisely/nn/tracker/deep_sort/deep_sort/detection.py +0 -49
- supervisely/nn/tracker/deep_sort/deep_sort/iou_matching.py +0 -81
- supervisely/nn/tracker/deep_sort/deep_sort/linear_assignment.py +0 -202
- supervisely/nn/tracker/deep_sort/deep_sort/nn_matching.py +0 -176
- supervisely/nn/tracker/deep_sort/deep_sort/track.py +0 -166
- supervisely/nn/tracker/deep_sort/deep_sort/tracker.py +0 -145
- supervisely/nn/tracker/deep_sort/deep_sort.py +0 -301
- supervisely/nn/tracker/deep_sort/generate_clip_detections.py +0 -90
- supervisely/nn/tracker/deep_sort/preprocessing.py +0 -70
- supervisely/nn/tracker/deep_sort/sly_tracker.py +0 -273
- supervisely/nn/tracker/tracker.py +0 -285
- supervisely/nn/tracker/utils/kalman_filter.py +0 -492
- supervisely/nn/tracking/__init__.py +0 -1
- supervisely/nn/tracking/boxmot.py +0 -114
- supervisely/nn/tracking/tracking.py +0 -24
- /supervisely/{nn/tracker/utils → app/widgets/deploy_model}/__init__.py +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/LICENSE +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/WHEEL +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.410.dist-info → supervisely-6.73.470.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from supervisely.geometry.geometry import Geometry
|
|
2
|
+
from supervisely.geometry.constants import LABELER_LOGIN, UPDATED_AT, CREATED_AT, ID, CLASS_ID
|
|
3
|
+
from supervisely.geometry.cuboid_3d import Vector3d
|
|
4
|
+
from typing import List, Union
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Polyline3D(Geometry):
|
|
8
|
+
"""
|
|
9
|
+
Polyline3D geometry
|
|
10
|
+
|
|
11
|
+
:param points: List of 3D point coordinates which define the polyline in 3D space.
|
|
12
|
+
:type points: List[List[int, int, int]]
|
|
13
|
+
:param sly_id: Polyline ID in Supervisely server.
|
|
14
|
+
:type sly_id: int, optional
|
|
15
|
+
:param class_id: ID of :class:`ObjClass<supervisely.annotation.obj_class.ObjClass>` to which Polyline belongs.
|
|
16
|
+
:type class_id: int, optional
|
|
17
|
+
:param labeler_login: Login of the user who created Polyline.
|
|
18
|
+
:type labeler_login: str, optional
|
|
19
|
+
:param updated_at: Date and Time when Polyline was modified last. Date Format: Year:Month:Day:Hour:Minute:Seconds. Example: '2021-01-22T19:37:50.158Z'.
|
|
20
|
+
:type updated_at: str, optional
|
|
21
|
+
:param created_at: Date and Time when Polyline was created. Date Format is the same as in "updated_at" parameter.
|
|
22
|
+
:type created_at: str, optional
|
|
23
|
+
|
|
24
|
+
:Usage example:
|
|
25
|
+
|
|
26
|
+
.. code-block:: python
|
|
27
|
+
|
|
28
|
+
import supervisely as sly
|
|
29
|
+
|
|
30
|
+
points = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
|
31
|
+
figure = sly.Polyline(points)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def geometry_name():
|
|
36
|
+
return "polyline_3d"
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
points: Union[List[float], List[Vector3d]],
|
|
41
|
+
sly_id=None,
|
|
42
|
+
class_id=None,
|
|
43
|
+
labeler_login=None,
|
|
44
|
+
updated_at=None,
|
|
45
|
+
created_at=None,
|
|
46
|
+
):
|
|
47
|
+
if not isinstance(points[0], Vector3d):
|
|
48
|
+
points = [Vector3d(point[0], point[1], point[2]) for point in points]
|
|
49
|
+
super().__init__(
|
|
50
|
+
sly_id=sly_id,
|
|
51
|
+
class_id=class_id,
|
|
52
|
+
labeler_login=labeler_login,
|
|
53
|
+
updated_at=updated_at,
|
|
54
|
+
created_at=created_at,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
self._points = points
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def points(self):
|
|
61
|
+
return self._points
|
|
62
|
+
|
|
63
|
+
def to_json(self):
|
|
64
|
+
points = [[point.x, point.y, point.z] for point in self._points]
|
|
65
|
+
res = {"points": points}
|
|
66
|
+
self._add_creation_info(res)
|
|
67
|
+
return res
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def from_json(cls, data):
|
|
71
|
+
"""
|
|
72
|
+
Convert a json dict to Polyline3D.
|
|
73
|
+
|
|
74
|
+
:param data: Polyline3D in json format as a dict.
|
|
75
|
+
:type data: dict
|
|
76
|
+
:return: Polyline3D object
|
|
77
|
+
:rtype: :class:`Polyline3D<Polyline3D>`
|
|
78
|
+
:Usage example:
|
|
79
|
+
|
|
80
|
+
.. code-block:: python
|
|
81
|
+
|
|
82
|
+
import supervisely as sly
|
|
83
|
+
|
|
84
|
+
figure_json = {
|
|
85
|
+
"points": {
|
|
86
|
+
[
|
|
87
|
+
[1, 2, 3],
|
|
88
|
+
[4, 5, 6],
|
|
89
|
+
[7, 8, 9]
|
|
90
|
+
],
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
figure = sly.Polyline3D.from_json(figure_json)
|
|
94
|
+
"""
|
|
95
|
+
if not data.get("points"):
|
|
96
|
+
raise ValueError("Data dict must contain 'points' field!")
|
|
97
|
+
points = data["points"]
|
|
98
|
+
labeler_login = data.get(LABELER_LOGIN, None)
|
|
99
|
+
updated_at = data.get(UPDATED_AT, None)
|
|
100
|
+
created_at = data.get(CREATED_AT, None)
|
|
101
|
+
sly_id = data.get(ID, None)
|
|
102
|
+
class_id = data.get(CLASS_ID, None)
|
|
103
|
+
return cls(
|
|
104
|
+
points,
|
|
105
|
+
sly_id=sly_id,
|
|
106
|
+
class_id=class_id,
|
|
107
|
+
labeler_login=labeler_login,
|
|
108
|
+
updated_at=updated_at,
|
|
109
|
+
created_at=created_at,
|
|
110
|
+
)
|
supervisely/io/env.py
CHANGED
|
@@ -1,9 +1,14 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
|
+
import json
|
|
2
3
|
import os
|
|
4
|
+
from contextvars import ContextVar, Token
|
|
3
5
|
from typing import Callable, List, Literal, Optional, Union
|
|
4
6
|
|
|
5
7
|
RAISE_IF_NOT_FOUND = True
|
|
6
|
-
|
|
8
|
+
_MULTIUSER_USER_CTX: ContextVar[Optional[Union[int, str]]] = ContextVar(
|
|
9
|
+
"supervisely_multiuser_app_user_id",
|
|
10
|
+
default=None,
|
|
11
|
+
)
|
|
7
12
|
|
|
8
13
|
def flag_from_env(s: str) -> bool:
|
|
9
14
|
"""Returns True if passed string is a flag, False otherwise.
|
|
@@ -686,3 +691,158 @@ def configure_minimum_instance_version() -> None:
|
|
|
686
691
|
latest_version = get_latest_instance_version_from_json()
|
|
687
692
|
if latest_version:
|
|
688
693
|
os.environ["MINIMUM_INSTANCE_VERSION_FOR_SDK"] = latest_version
|
|
694
|
+
|
|
695
|
+
def app_categories(raise_not_found: Optional[bool] = False) -> list:
|
|
696
|
+
"""Returns a list of app categories from environment variable using following keys:
|
|
697
|
+
- APP_CATEGORIES
|
|
698
|
+
:param raise_not_found: if True, raises KeyError if app category is not found in environment variables
|
|
699
|
+
:type raise_not_found: Optional[bool]
|
|
700
|
+
:return: app categories
|
|
701
|
+
:rtype: list
|
|
702
|
+
"""
|
|
703
|
+
return _parse_from_env(
|
|
704
|
+
name="app_category",
|
|
705
|
+
keys=["APP_CATEGORIES"],
|
|
706
|
+
postprocess_fn=lambda x: json.loads(x),
|
|
707
|
+
default=[],
|
|
708
|
+
raise_not_found=raise_not_found,
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
|
|
712
|
+
def upload_count(raise_not_found: Optional[bool] = False) -> dict:
|
|
713
|
+
"""Returns a dictionary of upload counts from environment variable using following
|
|
714
|
+
- UPLOAD_COUNT
|
|
715
|
+
:param raise_not_found: if True, raises KeyError if upload count is not found in environment variables
|
|
716
|
+
:type raise_not_found: Optional[bool]
|
|
717
|
+
:return: upload count
|
|
718
|
+
:rtype: dict
|
|
719
|
+
"""
|
|
720
|
+
return _parse_from_env(
|
|
721
|
+
name="upload_count",
|
|
722
|
+
keys=["UPLOAD_COUNT"],
|
|
723
|
+
postprocess_fn=lambda x: json.loads(x),
|
|
724
|
+
default={},
|
|
725
|
+
raise_not_found=raise_not_found,
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
def uploaded_ids(raise_not_found: Optional[bool] = False) -> dict:
|
|
730
|
+
"""Returns a dictionary with dataset IDs as keys and lists of uploaded IDs as values from environment variable using following
|
|
731
|
+
- UPLOADED_IDS
|
|
732
|
+
:param raise_not_found: if True, raises KeyError if uploaded IDs is not found in environment variables
|
|
733
|
+
:type raise_not_found: Optional[bool]
|
|
734
|
+
:return: uploaded IDs
|
|
735
|
+
:rtype: dict
|
|
736
|
+
"""
|
|
737
|
+
return _parse_from_env(
|
|
738
|
+
name="uploaded_ids",
|
|
739
|
+
keys=["UPLOADED_IDS"],
|
|
740
|
+
postprocess_fn=lambda x: json.loads(x),
|
|
741
|
+
default={},
|
|
742
|
+
raise_not_found=raise_not_found,
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
|
|
746
|
+
def increment_upload_count(dataset_id: int, count: int = 1) -> None:
|
|
747
|
+
"""Increments the upload count for the given dataset id by the specified count.
|
|
748
|
+
|
|
749
|
+
:param dataset_id: The dataset id to increment the upload count for.
|
|
750
|
+
:type dataset_id: int
|
|
751
|
+
:param count: The amount to increment the upload count by. Defaults to 1.
|
|
752
|
+
:type count: int
|
|
753
|
+
"""
|
|
754
|
+
upload_info = upload_count()
|
|
755
|
+
upload_info[str(dataset_id)] = upload_info.get(str(dataset_id), 0) + count
|
|
756
|
+
os.environ["UPLOAD_COUNT"] = json.dumps(upload_info)
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def add_uploaded_ids_to_env(dataset_id: int, ids: List[int]) -> None:
|
|
760
|
+
"""Adds the list of uploaded IDs to the environment variable for the given dataset ID.
|
|
761
|
+
|
|
762
|
+
:param dataset_id: The dataset ID to associate the uploaded IDs with.
|
|
763
|
+
:type dataset_id: int
|
|
764
|
+
:param ids: The list of uploaded IDs to add.
|
|
765
|
+
:type ids: List[int]
|
|
766
|
+
"""
|
|
767
|
+
uploaded = uploaded_ids()
|
|
768
|
+
if str(dataset_id) not in uploaded:
|
|
769
|
+
uploaded[str(dataset_id)] = []
|
|
770
|
+
existing_ids = set(uploaded[str(dataset_id)])
|
|
771
|
+
if set(ids).intersection(existing_ids):
|
|
772
|
+
for _id in ids:
|
|
773
|
+
if _id not in existing_ids:
|
|
774
|
+
uploaded[str(dataset_id)].append(_id)
|
|
775
|
+
else:
|
|
776
|
+
uploaded[str(dataset_id)].extend(ids)
|
|
777
|
+
os.environ["UPLOADED_IDS"] = json.dumps(uploaded)
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def is_multiuser_mode_enabled() -> bool:
|
|
781
|
+
"""Returns multiuser app mode flag from environment variable using following keys:
|
|
782
|
+
- SUPERVISELY_MULTIUSER_APP_MODE
|
|
783
|
+
:return: multiuser app mode flag
|
|
784
|
+
:rtype: bool
|
|
785
|
+
"""
|
|
786
|
+
return _parse_from_env(
|
|
787
|
+
name="is_multiuser_mode_enabled",
|
|
788
|
+
keys=["SUPERVISELY_MULTIUSER_APP_MODE"],
|
|
789
|
+
default=False,
|
|
790
|
+
raise_not_found=False,
|
|
791
|
+
postprocess_fn=flag_from_env,
|
|
792
|
+
)
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
def enable_multiuser_app_mode() -> None:
|
|
796
|
+
"""
|
|
797
|
+
Enables multiuser app mode by setting the environment variable.
|
|
798
|
+
This function can be used to activate multiuser mode in the application allowing
|
|
799
|
+
separation of user DataJson/StateJson.
|
|
800
|
+
"""
|
|
801
|
+
os.environ["SUPERVISELY_MULTIUSER_APP_MODE"] = "true"
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
def disable_multiuser_app_mode() -> None:
|
|
805
|
+
"""Disables multiuser app mode by removing the environment variable."""
|
|
806
|
+
os.environ.pop("SUPERVISELY_MULTIUSER_APP_MODE", None)
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
def set_user_for_multiuser_app(user_id: Optional[Union[int, str]]) -> Token:
|
|
810
|
+
"""
|
|
811
|
+
Sets the user ID for multiuser app mode by setting the environment variable.
|
|
812
|
+
This function should be used in multiuser mode to separate user DataJson/StateJson.
|
|
813
|
+
|
|
814
|
+
:param user_id: The user ID (or session key) to set for the current request.
|
|
815
|
+
:type user_id: int | str
|
|
816
|
+
:return: A context token that can be used to reset the user ID later.
|
|
817
|
+
:rtype: Token
|
|
818
|
+
:raises RuntimeError: If multiuser app mode is not enabled.
|
|
819
|
+
"""
|
|
820
|
+
if not is_multiuser_mode_enabled():
|
|
821
|
+
raise RuntimeError("Multiuser app mode is not enabled. Cannot set user ID.")
|
|
822
|
+
return _MULTIUSER_USER_CTX.set(user_id)
|
|
823
|
+
|
|
824
|
+
|
|
825
|
+
def reset_user_for_multiuser_app(token: Token) -> None:
|
|
826
|
+
"""
|
|
827
|
+
Resets the user ID for multiuser app mode using the provided context token.
|
|
828
|
+
|
|
829
|
+
:param token: Context token obtained from `set_user_for_multiuser_app`.
|
|
830
|
+
:type token: Token
|
|
831
|
+
"""
|
|
832
|
+
if not is_multiuser_mode_enabled():
|
|
833
|
+
return
|
|
834
|
+
_MULTIUSER_USER_CTX.reset(token)
|
|
835
|
+
|
|
836
|
+
|
|
837
|
+
def user_from_multiuser_app() -> Optional[Union[int, str]]:
|
|
838
|
+
"""
|
|
839
|
+
Retrieves the user ID for multiuser app mode from the environment variable.
|
|
840
|
+
|
|
841
|
+
:return: The user ID if set, otherwise None.
|
|
842
|
+
:rtype: Optional[Union[int, str]]
|
|
843
|
+
"""
|
|
844
|
+
if not is_multiuser_mode_enabled():
|
|
845
|
+
return None
|
|
846
|
+
user_id = _MULTIUSER_USER_CTX.get(None)
|
|
847
|
+
if user_id is not None:
|
|
848
|
+
return user_id
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from supervisely.nn.artifacts.detectron2 import Detectron2
|
|
2
2
|
from supervisely.nn.artifacts.hrda import HRDA
|
|
3
|
-
from supervisely.nn.artifacts.mmclassification import MMClassification
|
|
3
|
+
from supervisely.nn.artifacts.mmclassification import MMClassification, MMPretrain
|
|
4
4
|
from supervisely.nn.artifacts.mmdetection import MMDetection, MMDetection3
|
|
5
5
|
from supervisely.nn.artifacts.mmsegmentation import MMSegmentation
|
|
6
6
|
from supervisely.nn.artifacts.ritm import RITM
|
|
@@ -68,6 +68,7 @@ class BaseTrainArtifacts:
|
|
|
68
68
|
self._pattern: str = None
|
|
69
69
|
self._available_task_types: List[str] = []
|
|
70
70
|
self._require_runtime = False
|
|
71
|
+
self._has_benchmark_evaluation = False
|
|
71
72
|
|
|
72
73
|
@property
|
|
73
74
|
def team_id(self) -> int:
|
|
@@ -209,6 +210,13 @@ class BaseTrainArtifacts:
|
|
|
209
210
|
"""
|
|
210
211
|
return self._require_runtime
|
|
211
212
|
|
|
213
|
+
@property
|
|
214
|
+
def has_benchmark_evaluation(self):
|
|
215
|
+
"""
|
|
216
|
+
Whether the framework has integrated benchmark evaluation.
|
|
217
|
+
"""
|
|
218
|
+
return self._has_benchmark_evaluation
|
|
219
|
+
|
|
212
220
|
def is_valid_artifacts_path(self, path):
|
|
213
221
|
"""
|
|
214
222
|
Check if the provided path is valid and follows specified session path pattern.
|
|
@@ -610,9 +618,9 @@ class BaseTrainArtifacts:
|
|
|
610
618
|
date_time = parsed_datetime.strftime("%Y-%m-%d %H:%M:%S")
|
|
611
619
|
|
|
612
620
|
experiment_info_data = {
|
|
613
|
-
"experiment_name": f"
|
|
621
|
+
"experiment_name": f"{self.framework_name} experiment",
|
|
614
622
|
"framework_name": self.framework_name,
|
|
615
|
-
"model_name": f"
|
|
623
|
+
"model_name": f"{self.framework_name} model",
|
|
616
624
|
"task_type": train_info.task_type,
|
|
617
625
|
"project_id": project_id,
|
|
618
626
|
"task_id": train_info.task_id,
|
|
@@ -25,6 +25,7 @@ class Detectron2(BaseTrainArtifacts):
|
|
|
25
25
|
self._pattern = re_compile(r"^/detectron2/\d+_[^/]+/?$")
|
|
26
26
|
self._available_task_types: List[str] = ["instance segmentation"]
|
|
27
27
|
self._require_runtime = False
|
|
28
|
+
self._has_benchmark_evaluation = False
|
|
28
29
|
|
|
29
30
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
30
31
|
parts = artifacts_folder.split("/")
|
supervisely/nn/artifacts/hrda.py
CHANGED
|
@@ -20,6 +20,7 @@ class HRDA(BaseTrainArtifacts):
|
|
|
20
20
|
# self._config_file = "config.py"
|
|
21
21
|
# self._available_task_types: List[str] = ["semantic segmentation"]
|
|
22
22
|
# self._require_runtime = False
|
|
23
|
+
# self._has_benchmark_evaluation = False
|
|
23
24
|
|
|
24
25
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
25
26
|
raise NotImplementedError
|
|
@@ -21,6 +21,7 @@ class MMClassification(BaseTrainArtifacts):
|
|
|
21
21
|
self._pattern = re_compile(r"^/mmclassification/\d+_[^/]+/?$")
|
|
22
22
|
self._available_task_types: List[str] = ["classification"]
|
|
23
23
|
self._require_runtime = False
|
|
24
|
+
self._has_benchmark_evaluation = False
|
|
24
25
|
|
|
25
26
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
26
27
|
parts = artifacts_folder.split("/")
|
|
@@ -44,3 +45,22 @@ class MMClassification(BaseTrainArtifacts):
|
|
|
44
45
|
|
|
45
46
|
def get_config_path(self, artifacts_folder: str) -> str:
|
|
46
47
|
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class MMPretrain(MMClassification):
|
|
51
|
+
def __init__(self, team_id: int):
|
|
52
|
+
super().__init__(team_id)
|
|
53
|
+
|
|
54
|
+
self._app_name = "Train MMPretrain"
|
|
55
|
+
self._slug = "supervisely-ecosystem/mmpretrain/supervisely/train"
|
|
56
|
+
self._serve_app_name = "Serve MMPretrain"
|
|
57
|
+
self._serve_slug = "supervisely-ecosystem/mmpretrain/supervisely/serve"
|
|
58
|
+
self._framework_name = "MMPretrain"
|
|
59
|
+
self._framework_folder = "/mmclassification-v2"
|
|
60
|
+
self._weights_folder = "checkpoints"
|
|
61
|
+
self._task_type = "classification"
|
|
62
|
+
self._weights_ext = ".pth"
|
|
63
|
+
self._pattern = re_compile(r"^/mmclassification-v2/\d+_[^/]+/?$")
|
|
64
|
+
self._available_task_types: List[str] = ["classification"]
|
|
65
|
+
self._require_runtime = False
|
|
66
|
+
self._has_benchmark_evaluation = False
|
|
@@ -26,6 +26,7 @@ class MMDetection(BaseTrainArtifacts):
|
|
|
26
26
|
self._pattern = re_compile(r"^/mmdetection/\d+_[^/]+/?$")
|
|
27
27
|
self._available_task_types: List[str] = ["object detection", "instance segmentation"]
|
|
28
28
|
self._require_runtime = False
|
|
29
|
+
self._has_benchmark_evaluation = False
|
|
29
30
|
|
|
30
31
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
31
32
|
parts = artifacts_folder.split("/")
|
|
@@ -63,8 +64,8 @@ class MMDetection3(BaseTrainArtifacts):
|
|
|
63
64
|
super().__init__(team_id)
|
|
64
65
|
|
|
65
66
|
self._app_name = "Train MMDetection 3.0"
|
|
66
|
-
self._slug = "
|
|
67
|
-
self._serve_app_name = "
|
|
67
|
+
self._slug = "supervisely-ecosystem/train-mmdetection-v3"
|
|
68
|
+
self._serve_app_name = "Serve MMDetection 3.0"
|
|
68
69
|
self._serve_slug = "supervisely-ecosystem/serve-mmdetection-v3"
|
|
69
70
|
self._framework_name = "MMDetection 3.0"
|
|
70
71
|
self._framework_folder = "/mmdetection-3"
|
|
@@ -75,7 +76,8 @@ class MMDetection3(BaseTrainArtifacts):
|
|
|
75
76
|
self._pattern = re_compile(r"^/mmdetection-3/\d+_[^/]+/?$")
|
|
76
77
|
self._available_task_types: List[str] = ["object detection", "instance segmentation"]
|
|
77
78
|
self._require_runtime = False
|
|
78
|
-
|
|
79
|
+
self._has_benchmark_evaluation = True
|
|
80
|
+
|
|
79
81
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
80
82
|
parts = artifacts_folder.split("/")
|
|
81
83
|
if len(parts) < 3:
|
|
@@ -22,6 +22,7 @@ class MMSegmentation(BaseTrainArtifacts):
|
|
|
22
22
|
self._pattern = re_compile(r"^/mmsegmentation/\d+_[^/]+/?$")
|
|
23
23
|
self._available_task_types: List[str] = ["instance segmentation"]
|
|
24
24
|
self._require_runtime = False
|
|
25
|
+
self._has_benchmark_evaluation = True
|
|
25
26
|
|
|
26
27
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
27
28
|
return artifacts_folder.split("/")[2].split("_")[0]
|
supervisely/nn/artifacts/ritm.py
CHANGED
|
@@ -22,6 +22,7 @@ class RITM(BaseTrainArtifacts):
|
|
|
22
22
|
self._pattern = re_compile(r"^/RITM_training/\d+_[^/]+/?$")
|
|
23
23
|
self._available_task_types: List[str] = ["interactive segmentation"]
|
|
24
24
|
self._require_runtime = False
|
|
25
|
+
self._has_benchmark_evaluation = False
|
|
25
26
|
|
|
26
27
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
27
28
|
parts = artifacts_folder.split("/")
|
|
@@ -22,6 +22,7 @@ class RTDETR(BaseTrainArtifacts):
|
|
|
22
22
|
self._pattern = re_compile(r"^/RT-DETR/[^/]+/\d+/?$")
|
|
23
23
|
self._available_task_types: List[str] = ["object detection"]
|
|
24
24
|
self._require_runtime = False
|
|
25
|
+
self._has_benchmark_evaluation = True
|
|
25
26
|
|
|
26
27
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
27
28
|
return artifacts_folder.split("/")[-1]
|
supervisely/nn/artifacts/unet.py
CHANGED
|
@@ -22,6 +22,7 @@ class UNet(BaseTrainArtifacts):
|
|
|
22
22
|
self._pattern = re_compile(r"^/unet/\d+_[^/]+/?$")
|
|
23
23
|
self._available_task_types: List[str] = ["semantic segmentation"]
|
|
24
24
|
self._require_runtime = False
|
|
25
|
+
self._has_benchmark_evaluation = True
|
|
25
26
|
|
|
26
27
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
27
28
|
parts = artifacts_folder.split("/")
|
|
@@ -4,6 +4,7 @@ from supervisely.nn.artifacts import (
|
|
|
4
4
|
YOLOv5v2,
|
|
5
5
|
YOLOv8,
|
|
6
6
|
MMClassification,
|
|
7
|
+
MMPretrain,
|
|
7
8
|
MMSegmentation,
|
|
8
9
|
MMDetection,
|
|
9
10
|
MMDetection3,
|
|
@@ -19,6 +20,7 @@ class FrameworkName:
|
|
|
19
20
|
YOLOV5V2 = "YOLOv5 2.0"
|
|
20
21
|
YOLOV8 = "YOLOv8+"
|
|
21
22
|
MMCLASSIFICATION = "MMClassification"
|
|
23
|
+
MMPRETRAIN = "MMPretrain"
|
|
22
24
|
MMSEGMENTATION = "MMSegmentation"
|
|
23
25
|
MMDETECTION = "MMDetection"
|
|
24
26
|
MMDETECTION3 = "MMDetection 3.0"
|
|
@@ -34,6 +36,7 @@ class FrameworkMapper:
|
|
|
34
36
|
FrameworkName.YOLOV5V2: YOLOv5v2,
|
|
35
37
|
FrameworkName.YOLOV8: YOLOv8,
|
|
36
38
|
FrameworkName.MMCLASSIFICATION: MMClassification,
|
|
39
|
+
FrameworkName.MMPRETRAIN: MMPretrain,
|
|
37
40
|
FrameworkName.MMSEGMENTATION: MMSegmentation,
|
|
38
41
|
FrameworkName.MMDETECTION: MMDetection,
|
|
39
42
|
FrameworkName.MMDETECTION3: MMDetection3,
|
|
@@ -22,6 +22,7 @@ class YOLOv5(BaseTrainArtifacts):
|
|
|
22
22
|
self._pattern = re_compile(r"^/yolov5_train/[^/]+/\d+/?$")
|
|
23
23
|
self._available_task_types: List[str] = ["object detection"]
|
|
24
24
|
self._require_runtime = False
|
|
25
|
+
self._has_benchmark_evaluation = False
|
|
25
26
|
|
|
26
27
|
def get_task_id(self, artifacts_folder: str) -> str:
|
|
27
28
|
return artifacts_folder.split("/")[-1]
|
|
@@ -55,3 +56,4 @@ class YOLOv5v2(YOLOv5):
|
|
|
55
56
|
self._config_file = None
|
|
56
57
|
self._pattern = re_compile(r"^/yolov5_2.0_train/[^/]+/\d+/?$")
|
|
57
58
|
self._available_task_types: List[str] = ["object detection"]
|
|
59
|
+
self._has_benchmark_evaluation = False
|
|
@@ -70,24 +70,24 @@ class MetricProvider:
|
|
|
70
70
|
|
|
71
71
|
def json_metrics(self):
|
|
72
72
|
return {
|
|
73
|
-
"mIoU": self.eval_data.loc["mean"]["IoU"]
|
|
74
|
-
"mE_boundary_oU": self.eval_data.loc["mean"]["E_boundary_oU"]
|
|
75
|
-
"mFP_boundary_oU": self.eval_data.loc["mean"]["FP_boundary_oU"]
|
|
76
|
-
"mFN_boundary_oU": self.eval_data.loc["mean"]["FN_boundary_oU"]
|
|
77
|
-
"mE_boundary_oU_renormed": self.eval_data.loc["mean"]["E_boundary_oU_renormed"]
|
|
78
|
-
"mE_extent_oU": self.eval_data.loc["mean"]["E_extent_oU"]
|
|
79
|
-
"mFP_extent_oU": self.eval_data.loc["mean"]["FP_extent_oU"]
|
|
80
|
-
"mFN_extent_oU": self.eval_data.loc["mean"]["FN_extent_oU"]
|
|
81
|
-
"mE_extent_oU_renormed": self.eval_data.loc["mean"]["E_extent_oU_renormed"]
|
|
82
|
-
"mE_segment_oU": self.eval_data.loc["mean"]["E_segment_oU"]
|
|
83
|
-
"mFP_segment_oU": self.eval_data.loc["mean"]["FP_segment_oU"]
|
|
84
|
-
"mFN_segment_oU": self.eval_data.loc["mean"]["FN_segment_oU"]
|
|
85
|
-
"mE_segment_oU_renormed": self.eval_data.loc["mean"]["E_segment_oU_renormed"]
|
|
86
|
-
"mPrecision": self.eval_data.loc["mean"]["precision"]
|
|
87
|
-
"mRecall": self.eval_data.loc["mean"]["recall"]
|
|
88
|
-
"mF1_score": self.eval_data.loc["mean"]["F1_score"]
|
|
89
|
-
"PixelAcc": self.pixel_accuracy
|
|
90
|
-
"mBoundaryIoU": self.eval_data.loc["mean"]["boundary_IoU"]
|
|
73
|
+
"mIoU": self.eval_data.loc["mean"]["IoU"],
|
|
74
|
+
"mE_boundary_oU": self.eval_data.loc["mean"]["E_boundary_oU"],
|
|
75
|
+
"mFP_boundary_oU": self.eval_data.loc["mean"]["FP_boundary_oU"],
|
|
76
|
+
"mFN_boundary_oU": self.eval_data.loc["mean"]["FN_boundary_oU"],
|
|
77
|
+
"mE_boundary_oU_renormed": self.eval_data.loc["mean"]["E_boundary_oU_renormed"],
|
|
78
|
+
"mE_extent_oU": self.eval_data.loc["mean"]["E_extent_oU"],
|
|
79
|
+
"mFP_extent_oU": self.eval_data.loc["mean"]["FP_extent_oU"],
|
|
80
|
+
"mFN_extent_oU": self.eval_data.loc["mean"]["FN_extent_oU"],
|
|
81
|
+
"mE_extent_oU_renormed": self.eval_data.loc["mean"]["E_extent_oU_renormed"],
|
|
82
|
+
"mE_segment_oU": self.eval_data.loc["mean"]["E_segment_oU"],
|
|
83
|
+
"mFP_segment_oU": self.eval_data.loc["mean"]["FP_segment_oU"],
|
|
84
|
+
"mFN_segment_oU": self.eval_data.loc["mean"]["FN_segment_oU"],
|
|
85
|
+
"mE_segment_oU_renormed": self.eval_data.loc["mean"]["E_segment_oU_renormed"],
|
|
86
|
+
"mPrecision": self.eval_data.loc["mean"]["precision"],
|
|
87
|
+
"mRecall": self.eval_data.loc["mean"]["recall"],
|
|
88
|
+
"mF1_score": self.eval_data.loc["mean"]["F1_score"],
|
|
89
|
+
"PixelAcc": self.pixel_accuracy,
|
|
90
|
+
"mBoundaryIoU": self.eval_data.loc["mean"]["boundary_IoU"],
|
|
91
91
|
}
|
|
92
92
|
|
|
93
93
|
def metric_table(self):
|
supervisely/nn/experiments.py
CHANGED
|
@@ -54,6 +54,8 @@ class ExperimentInfo:
|
|
|
54
54
|
"""Number of images in the validation set"""
|
|
55
55
|
datetime: Optional[str] = None
|
|
56
56
|
"""Date and time when the experiment was started"""
|
|
57
|
+
experiment_report_id: Optional[int] = None
|
|
58
|
+
"""ID of the experiment report"""
|
|
57
59
|
evaluation_report_id: Optional[int] = None
|
|
58
60
|
"""ID of the model benchmark evaluation report"""
|
|
59
61
|
evaluation_report_link: Optional[str] = None
|
|
@@ -62,6 +64,12 @@ class ExperimentInfo:
|
|
|
62
64
|
"""Evaluation metrics"""
|
|
63
65
|
logs: Optional[dict] = None
|
|
64
66
|
"""Dictionary with link and type of logger"""
|
|
67
|
+
train_collection_id: Optional[int] = None
|
|
68
|
+
"""ID of the collection with train images"""
|
|
69
|
+
val_collection_id: Optional[int] = None
|
|
70
|
+
"""ID of the collection with validation images"""
|
|
71
|
+
project_version: Optional[int] = None
|
|
72
|
+
"""Version of the project"""
|
|
65
73
|
|
|
66
74
|
def __init__(self, **kwargs):
|
|
67
75
|
required_fieds = {
|
|
@@ -82,6 +90,7 @@ class ExperimentInfo:
|
|
|
82
90
|
for field in fields(self.__class__):
|
|
83
91
|
value = getattr(self, field.name)
|
|
84
92
|
data[field.name] = value
|
|
93
|
+
return data
|
|
85
94
|
|
|
86
95
|
|
|
87
96
|
def get_experiment_infos(api: Api, team_id: int, framework_name: str) -> List[ExperimentInfo]:
|
|
@@ -771,7 +771,7 @@ class InferenceImageCache:
|
|
|
771
771
|
def _download_many(
|
|
772
772
|
self,
|
|
773
773
|
indexes: List[Union[int, str]],
|
|
774
|
-
|
|
774
|
+
name_constructor: Callable[[int], str],
|
|
775
775
|
load_generator: Callable[
|
|
776
776
|
[List[int]],
|
|
777
777
|
Generator[Tuple[Union[int, str], np.ndarray], None, None],
|
|
@@ -785,24 +785,42 @@ class InferenceImageCache:
|
|
|
785
785
|
all_frames = [None for _ in range(len(indexes))]
|
|
786
786
|
|
|
787
787
|
def get_one_image(item):
|
|
788
|
-
pos,
|
|
788
|
+
pos, hash_or_id = item
|
|
789
789
|
if video_id in self._cache:
|
|
790
|
-
|
|
791
|
-
|
|
790
|
+
try:
|
|
791
|
+
frame = self.get_frame_from_cache(video_id, hash_or_id)
|
|
792
|
+
except Exception as e:
|
|
793
|
+
logger.error(
|
|
794
|
+
f"Error retrieving frame from cache: {repr(e)}. Frame will be re-downloaded",
|
|
795
|
+
exc_info=True,
|
|
796
|
+
)
|
|
797
|
+
ids_to_load.append(hash_or_id)
|
|
798
|
+
return pos, None
|
|
799
|
+
return pos, frame
|
|
800
|
+
try:
|
|
801
|
+
image = self._cache.get_image(name_constructor(hash_or_id))
|
|
802
|
+
except Exception as e:
|
|
803
|
+
logger.error(
|
|
804
|
+
f"Error retrieving image from cache: {repr(e)}. Image will be re-downloaded",
|
|
805
|
+
exc_info=True,
|
|
806
|
+
)
|
|
807
|
+
ids_to_load.append(hash_or_id)
|
|
808
|
+
return pos, None
|
|
809
|
+
return pos, image
|
|
792
810
|
|
|
793
811
|
position = 0
|
|
794
812
|
batch_size = 4
|
|
795
813
|
for batch in batched(indexes, batch_size):
|
|
796
|
-
|
|
814
|
+
ids_to_load = []
|
|
797
815
|
items = []
|
|
798
816
|
for hash_or_id in batch:
|
|
799
|
-
name =
|
|
817
|
+
name = name_constructor(hash_or_id)
|
|
800
818
|
self._wait_if_in_queue(name, logger)
|
|
801
|
-
|
|
819
|
+
pos_by_name[name] = position
|
|
802
820
|
if name not in self._cache and video_id not in self._cache:
|
|
803
821
|
self._load_queue.set(name, hash_or_id)
|
|
804
|
-
|
|
805
|
-
|
|
822
|
+
ids_to_load.append(hash_or_id)
|
|
823
|
+
|
|
806
824
|
elif return_images is True:
|
|
807
825
|
items.append((position, hash_or_id))
|
|
808
826
|
position += 1
|
|
@@ -810,14 +828,16 @@ class InferenceImageCache:
|
|
|
810
828
|
if len(items) > 0:
|
|
811
829
|
with ThreadPoolExecutor(min(64, len(items))) as executor:
|
|
812
830
|
for pos, image in executor.map(get_one_image, items):
|
|
831
|
+
if image is None:
|
|
832
|
+
continue
|
|
813
833
|
all_frames[pos] = image
|
|
814
834
|
if progress_cb is not None:
|
|
815
835
|
progress_cb()
|
|
816
836
|
|
|
817
837
|
download_time = time.monotonic()
|
|
818
|
-
if len(
|
|
819
|
-
for id_or_hash, image in load_generator(
|
|
820
|
-
name =
|
|
838
|
+
if len(ids_to_load) > 0:
|
|
839
|
+
for id_or_hash, image in load_generator(ids_to_load):
|
|
840
|
+
name = name_constructor(id_or_hash)
|
|
821
841
|
self._add_to_cache(name, image)
|
|
822
842
|
|
|
823
843
|
if return_images:
|
|
@@ -828,13 +848,13 @@ class InferenceImageCache:
|
|
|
828
848
|
download_time = time.monotonic() - download_time
|
|
829
849
|
|
|
830
850
|
# logger.debug(f"All stored files: {sorted(os.listdir(self.tmp_path))}")
|
|
831
|
-
if
|
|
832
|
-
|
|
851
|
+
if ids_to_load:
|
|
852
|
+
ids_to_load = list(ids_to_load)
|
|
833
853
|
logger.debug(
|
|
834
|
-
f"Images/Frames added to cache: {
|
|
835
|
-
extra={"indexes":
|
|
854
|
+
f"Images/Frames added to cache: {ids_to_load} in {download_time:.2f} sec",
|
|
855
|
+
extra={"indexes": ids_to_load, "download_time": download_time},
|
|
836
856
|
)
|
|
837
|
-
found = set(batch).difference(
|
|
857
|
+
found = set(batch).difference(ids_to_load)
|
|
838
858
|
if found:
|
|
839
859
|
logger.debug(f"Images/Frames found in cache: {list(found)}")
|
|
840
860
|
|