supervisely 6.73.452__py3-none-any.whl → 6.73.513__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (189) hide show
  1. supervisely/__init__.py +25 -1
  2. supervisely/annotation/annotation.py +8 -2
  3. supervisely/annotation/json_geometries_map.py +13 -12
  4. supervisely/api/annotation_api.py +6 -3
  5. supervisely/api/api.py +2 -0
  6. supervisely/api/app_api.py +10 -1
  7. supervisely/api/dataset_api.py +74 -12
  8. supervisely/api/entities_collection_api.py +10 -0
  9. supervisely/api/entity_annotation/figure_api.py +28 -0
  10. supervisely/api/entity_annotation/object_api.py +3 -3
  11. supervisely/api/entity_annotation/tag_api.py +63 -12
  12. supervisely/api/guides_api.py +210 -0
  13. supervisely/api/image_api.py +4 -0
  14. supervisely/api/labeling_job_api.py +83 -1
  15. supervisely/api/labeling_queue_api.py +33 -7
  16. supervisely/api/module_api.py +5 -0
  17. supervisely/api/project_api.py +71 -26
  18. supervisely/api/storage_api.py +3 -1
  19. supervisely/api/task_api.py +13 -2
  20. supervisely/api/team_api.py +4 -3
  21. supervisely/api/video/video_annotation_api.py +119 -3
  22. supervisely/api/video/video_api.py +65 -14
  23. supervisely/app/__init__.py +1 -1
  24. supervisely/app/content.py +23 -7
  25. supervisely/app/development/development.py +18 -2
  26. supervisely/app/fastapi/__init__.py +1 -0
  27. supervisely/app/fastapi/custom_static_files.py +1 -1
  28. supervisely/app/fastapi/multi_user.py +105 -0
  29. supervisely/app/fastapi/subapp.py +88 -42
  30. supervisely/app/fastapi/websocket.py +77 -9
  31. supervisely/app/singleton.py +21 -0
  32. supervisely/app/v1/app_service.py +18 -2
  33. supervisely/app/v1/constants.py +7 -1
  34. supervisely/app/widgets/__init__.py +6 -0
  35. supervisely/app/widgets/activity_feed/__init__.py +0 -0
  36. supervisely/app/widgets/activity_feed/activity_feed.py +239 -0
  37. supervisely/app/widgets/activity_feed/style.css +78 -0
  38. supervisely/app/widgets/activity_feed/template.html +22 -0
  39. supervisely/app/widgets/card/card.py +20 -0
  40. supervisely/app/widgets/classes_list_selector/classes_list_selector.py +121 -9
  41. supervisely/app/widgets/classes_list_selector/template.html +60 -93
  42. supervisely/app/widgets/classes_mapping/classes_mapping.py +13 -12
  43. supervisely/app/widgets/classes_table/classes_table.py +1 -0
  44. supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
  45. supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +1 -1
  46. supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
  47. supervisely/app/widgets/fast_table/fast_table.py +184 -60
  48. supervisely/app/widgets/fast_table/template.html +1 -1
  49. supervisely/app/widgets/heatmap/__init__.py +0 -0
  50. supervisely/app/widgets/heatmap/heatmap.py +564 -0
  51. supervisely/app/widgets/heatmap/script.js +533 -0
  52. supervisely/app/widgets/heatmap/style.css +233 -0
  53. supervisely/app/widgets/heatmap/template.html +21 -0
  54. supervisely/app/widgets/modal/__init__.py +0 -0
  55. supervisely/app/widgets/modal/modal.py +198 -0
  56. supervisely/app/widgets/modal/template.html +10 -0
  57. supervisely/app/widgets/object_class_view/object_class_view.py +3 -0
  58. supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
  59. supervisely/app/widgets/radio_tabs/template.html +1 -0
  60. supervisely/app/widgets/select/select.py +6 -3
  61. supervisely/app/widgets/select_class/__init__.py +0 -0
  62. supervisely/app/widgets/select_class/select_class.py +363 -0
  63. supervisely/app/widgets/select_class/template.html +50 -0
  64. supervisely/app/widgets/select_cuda/select_cuda.py +22 -0
  65. supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +65 -7
  66. supervisely/app/widgets/select_tag/__init__.py +0 -0
  67. supervisely/app/widgets/select_tag/select_tag.py +352 -0
  68. supervisely/app/widgets/select_tag/template.html +64 -0
  69. supervisely/app/widgets/select_team/select_team.py +37 -4
  70. supervisely/app/widgets/select_team/template.html +4 -5
  71. supervisely/app/widgets/select_user/__init__.py +0 -0
  72. supervisely/app/widgets/select_user/select_user.py +270 -0
  73. supervisely/app/widgets/select_user/template.html +13 -0
  74. supervisely/app/widgets/select_workspace/select_workspace.py +59 -10
  75. supervisely/app/widgets/select_workspace/template.html +9 -12
  76. supervisely/app/widgets/table/table.py +68 -13
  77. supervisely/app/widgets/tree_select/tree_select.py +2 -0
  78. supervisely/aug/aug.py +6 -2
  79. supervisely/convert/base_converter.py +1 -0
  80. supervisely/convert/converter.py +2 -2
  81. supervisely/convert/image/image_converter.py +3 -1
  82. supervisely/convert/image/image_helper.py +48 -4
  83. supervisely/convert/image/label_studio/label_studio_converter.py +2 -0
  84. supervisely/convert/image/medical2d/medical2d_helper.py +2 -24
  85. supervisely/convert/image/multispectral/multispectral_converter.py +6 -0
  86. supervisely/convert/image/pascal_voc/pascal_voc_converter.py +8 -5
  87. supervisely/convert/image/pascal_voc/pascal_voc_helper.py +7 -0
  88. supervisely/convert/pointcloud/kitti_3d/kitti_3d_converter.py +33 -3
  89. supervisely/convert/pointcloud/kitti_3d/kitti_3d_helper.py +12 -5
  90. supervisely/convert/pointcloud/las/las_converter.py +13 -1
  91. supervisely/convert/pointcloud/las/las_helper.py +110 -11
  92. supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
  93. supervisely/convert/pointcloud/pointcloud_converter.py +91 -3
  94. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
  95. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
  96. supervisely/convert/video/__init__.py +1 -0
  97. supervisely/convert/video/multi_view/__init__.py +0 -0
  98. supervisely/convert/video/multi_view/multi_view.py +543 -0
  99. supervisely/convert/video/sly/sly_video_converter.py +359 -3
  100. supervisely/convert/video/video_converter.py +22 -2
  101. supervisely/convert/volume/dicom/dicom_converter.py +13 -5
  102. supervisely/convert/volume/dicom/dicom_helper.py +30 -18
  103. supervisely/geometry/constants.py +1 -0
  104. supervisely/geometry/geometry.py +4 -0
  105. supervisely/geometry/helpers.py +5 -1
  106. supervisely/geometry/oriented_bbox.py +676 -0
  107. supervisely/geometry/rectangle.py +2 -1
  108. supervisely/io/env.py +76 -1
  109. supervisely/io/fs.py +21 -0
  110. supervisely/nn/benchmark/base_evaluator.py +104 -11
  111. supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -8
  112. supervisely/nn/benchmark/object_detection/evaluator.py +20 -4
  113. supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +10 -5
  114. supervisely/nn/benchmark/semantic_segmentation/evaluator.py +34 -16
  115. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +1 -1
  116. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +1 -1
  117. supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -1
  118. supervisely/nn/benchmark/visualization/evaluation_result.py +66 -4
  119. supervisely/nn/inference/cache.py +43 -18
  120. supervisely/nn/inference/gui/serving_gui_template.py +5 -2
  121. supervisely/nn/inference/inference.py +795 -199
  122. supervisely/nn/inference/inference_request.py +42 -9
  123. supervisely/nn/inference/predict_app/gui/classes_selector.py +83 -12
  124. supervisely/nn/inference/predict_app/gui/gui.py +676 -488
  125. supervisely/nn/inference/predict_app/gui/input_selector.py +205 -26
  126. supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
  127. supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
  128. supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
  129. supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
  130. supervisely/nn/inference/predict_app/gui/utils.py +236 -119
  131. supervisely/nn/inference/predict_app/predict_app.py +2 -2
  132. supervisely/nn/inference/session.py +43 -35
  133. supervisely/nn/inference/tracking/bbox_tracking.py +113 -34
  134. supervisely/nn/inference/tracking/tracker_interface.py +7 -2
  135. supervisely/nn/inference/uploader.py +139 -12
  136. supervisely/nn/live_training/__init__.py +7 -0
  137. supervisely/nn/live_training/api_server.py +111 -0
  138. supervisely/nn/live_training/artifacts_utils.py +243 -0
  139. supervisely/nn/live_training/checkpoint_utils.py +229 -0
  140. supervisely/nn/live_training/dynamic_sampler.py +44 -0
  141. supervisely/nn/live_training/helpers.py +14 -0
  142. supervisely/nn/live_training/incremental_dataset.py +146 -0
  143. supervisely/nn/live_training/live_training.py +497 -0
  144. supervisely/nn/live_training/loss_plateau_detector.py +111 -0
  145. supervisely/nn/live_training/request_queue.py +52 -0
  146. supervisely/nn/model/model_api.py +9 -0
  147. supervisely/nn/prediction_dto.py +12 -1
  148. supervisely/nn/tracker/base_tracker.py +11 -1
  149. supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
  150. supervisely/nn/tracker/botsort/tracker/mc_bot_sort.py +7 -4
  151. supervisely/nn/tracker/botsort_tracker.py +94 -65
  152. supervisely/nn/tracker/visualize.py +87 -90
  153. supervisely/nn/training/gui/classes_selector.py +16 -1
  154. supervisely/nn/training/train_app.py +28 -29
  155. supervisely/project/data_version.py +115 -51
  156. supervisely/project/download.py +1 -1
  157. supervisely/project/pointcloud_episode_project.py +37 -8
  158. supervisely/project/pointcloud_project.py +30 -2
  159. supervisely/project/project.py +14 -2
  160. supervisely/project/project_meta.py +27 -1
  161. supervisely/project/project_settings.py +32 -18
  162. supervisely/project/versioning/__init__.py +1 -0
  163. supervisely/project/versioning/common.py +20 -0
  164. supervisely/project/versioning/schema_fields.py +35 -0
  165. supervisely/project/versioning/video_schema.py +221 -0
  166. supervisely/project/versioning/volume_schema.py +87 -0
  167. supervisely/project/video_project.py +717 -15
  168. supervisely/project/volume_project.py +623 -5
  169. supervisely/template/experiment/experiment.html.jinja +4 -4
  170. supervisely/template/experiment/experiment_generator.py +14 -21
  171. supervisely/template/live_training/__init__.py +0 -0
  172. supervisely/template/live_training/header.html.jinja +96 -0
  173. supervisely/template/live_training/live_training.html.jinja +51 -0
  174. supervisely/template/live_training/live_training_generator.py +464 -0
  175. supervisely/template/live_training/sly-style.css +402 -0
  176. supervisely/template/live_training/template.html.jinja +18 -0
  177. supervisely/versions.json +28 -26
  178. supervisely/video/sampling.py +39 -20
  179. supervisely/video/video.py +40 -11
  180. supervisely/video_annotation/video_object.py +29 -4
  181. supervisely/volume/stl_converter.py +2 -0
  182. supervisely/worker_api/agent_rpc.py +24 -1
  183. supervisely/worker_api/rpc_servicer.py +31 -7
  184. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/METADATA +56 -39
  185. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/RECORD +189 -142
  186. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/WHEEL +1 -1
  187. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/entry_points.txt +0 -0
  188. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info/licenses}/LICENSE +0 -0
  189. {supervisely-6.73.452.dist-info → supervisely-6.73.513.dist-info}/top_level.txt +0 -0
supervisely/io/env.py CHANGED
@@ -1,10 +1,14 @@
1
1
  # coding: utf-8
2
2
  import json
3
3
  import os
4
+ from contextvars import ContextVar, Token
4
5
  from typing import Callable, List, Literal, Optional, Union
5
6
 
6
7
  RAISE_IF_NOT_FOUND = True
7
-
8
+ _MULTIUSER_USER_CTX: ContextVar[Optional[Union[int, str]]] = ContextVar(
9
+ "supervisely_multiuser_app_user_id",
10
+ default=None,
11
+ )
8
12
 
9
13
  def flag_from_env(s: str) -> bool:
10
14
  """Returns True if passed string is a flag, False otherwise.
@@ -771,3 +775,74 @@ def add_uploaded_ids_to_env(dataset_id: int, ids: List[int]) -> None:
771
775
  else:
772
776
  uploaded[str(dataset_id)].extend(ids)
773
777
  os.environ["UPLOADED_IDS"] = json.dumps(uploaded)
778
+
779
+
780
+ def is_multiuser_mode_enabled() -> bool:
781
+ """Returns multiuser app mode flag from environment variable using following keys:
782
+ - SUPERVISELY_MULTIUSER_APP_MODE
783
+ :return: multiuser app mode flag
784
+ :rtype: bool
785
+ """
786
+ return _parse_from_env(
787
+ name="is_multiuser_mode_enabled",
788
+ keys=["SUPERVISELY_MULTIUSER_APP_MODE"],
789
+ default=False,
790
+ raise_not_found=False,
791
+ postprocess_fn=flag_from_env,
792
+ )
793
+
794
+
795
+ def enable_multiuser_app_mode() -> None:
796
+ """
797
+ Enables multiuser app mode by setting the environment variable.
798
+ This function can be used to activate multiuser mode in the application allowing
799
+ separation of user DataJson/StateJson.
800
+ """
801
+ os.environ["SUPERVISELY_MULTIUSER_APP_MODE"] = "true"
802
+
803
+
804
+ def disable_multiuser_app_mode() -> None:
805
+ """Disables multiuser app mode by removing the environment variable."""
806
+ os.environ.pop("SUPERVISELY_MULTIUSER_APP_MODE", None)
807
+
808
+
809
+ def set_user_for_multiuser_app(user_id: Optional[Union[int, str]]) -> Token:
810
+ """
811
+ Sets the user ID for multiuser app mode by setting the environment variable.
812
+ This function should be used in multiuser mode to separate user DataJson/StateJson.
813
+
814
+ :param user_id: The user ID (or session key) to set for the current request.
815
+ :type user_id: int | str
816
+ :return: A context token that can be used to reset the user ID later.
817
+ :rtype: Token
818
+ :raises RuntimeError: If multiuser app mode is not enabled.
819
+ """
820
+ if not is_multiuser_mode_enabled():
821
+ raise RuntimeError("Multiuser app mode is not enabled. Cannot set user ID.")
822
+ return _MULTIUSER_USER_CTX.set(user_id)
823
+
824
+
825
+ def reset_user_for_multiuser_app(token: Token) -> None:
826
+ """
827
+ Resets the user ID for multiuser app mode using the provided context token.
828
+
829
+ :param token: Context token obtained from `set_user_for_multiuser_app`.
830
+ :type token: Token
831
+ """
832
+ if not is_multiuser_mode_enabled():
833
+ return
834
+ _MULTIUSER_USER_CTX.reset(token)
835
+
836
+
837
+ def user_from_multiuser_app() -> Optional[Union[int, str]]:
838
+ """
839
+ Retrieves the user ID for multiuser app mode from the environment variable.
840
+
841
+ :return: The user ID if set, otherwise None.
842
+ :rtype: Optional[Union[int, str]]
843
+ """
844
+ if not is_multiuser_mode_enabled():
845
+ return None
846
+ user_id = _MULTIUSER_USER_CTX.get(None)
847
+ if user_id is not None:
848
+ return user_id
supervisely/io/fs.py CHANGED
@@ -929,6 +929,27 @@ def get_file_hash_chunked(path: str, chunk_size: Optional[int] = 1024 * 1024) ->
929
929
  return base64.b64encode(digest).decode("utf-8")
930
930
 
931
931
 
932
+ async def get_file_hash_chunked_async(path: str, chunk_size: Optional[int] = 1024 * 1024) -> str:
933
+ """
934
+ Asynchronously get hash from target file by reading it in chunks.
935
+
936
+ :param path: Target file path.
937
+ :type path: str
938
+ :param chunk_size: Number of bytes to read per iteration. Default is 1 MB.
939
+ :type chunk_size: int, optional
940
+ :returns: File hash as a base64 encoded string.
941
+ :rtype: str
942
+ """
943
+ hash_sha256 = hashlib.sha256()
944
+ async with aiofiles.open(path, "rb") as file:
945
+ while True:
946
+ chunk = await file.read(chunk_size)
947
+ if not chunk:
948
+ break
949
+ hash_sha256.update(chunk)
950
+ return base64.b64encode(hash_sha256.digest()).decode("utf-8")
951
+
952
+
932
953
  def tree(dir_path: str) -> str:
933
954
  """
934
955
  Get tree for target directory.
@@ -1,13 +1,19 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import json
3
4
  import os
4
5
  import pickle
5
- from typing import Dict, List, Optional, Union
6
+ import zipfile
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional, Union
6
9
 
10
+ import numpy as np
11
+ import pandas as pd
7
12
  import yaml
8
13
 
9
14
  from supervisely.app.widgets import SlyTqdm
10
- from supervisely.io.fs import get_file_name_with_ext
15
+ from supervisely.io.fs import get_file_name_with_ext, silent_remove
16
+ from supervisely.io.json import dump_json_file, load_json_file
11
17
  from supervisely.task.progress import tqdm_sly
12
18
 
13
19
 
@@ -17,8 +23,8 @@ class BaseEvalResult:
17
23
  def __init__(self, directory: Optional[str] = None):
18
24
  self.directory = directory
19
25
  self.inference_info: Dict = {}
20
- self.speedtest_info: Dict = None
21
- self.eval_data: Dict = None
26
+ self.speedtest_info: Optional[Dict] = None
27
+ self.eval_data: Optional[Dict] = None
22
28
  self.mp = None
23
29
 
24
30
  if self.directory is not None:
@@ -44,19 +50,19 @@ class BaseEvalResult:
44
50
  )
45
51
 
46
52
  @property
47
- def short_name(self) -> str:
53
+ def short_name(self) -> Union[str, None]:
48
54
  if not self.name:
49
- return
55
+ return None
50
56
  if len(self.name) > 20:
51
57
  return self.name[:9] + "..." + self.name[-7:]
52
58
  return self.name
53
59
 
54
60
  @property
55
- def gt_project_id(self) -> int:
61
+ def gt_project_id(self) -> Optional[int]:
56
62
  return self.inference_info.get("gt_project_id")
57
63
 
58
64
  @property
59
- def gt_dataset_ids(self) -> List[int]:
65
+ def gt_dataset_ids(self) -> Optional[List[int]]:
60
66
  return self.inference_info.get("gt_dataset_ids", None)
61
67
 
62
68
  @property
@@ -112,9 +118,49 @@ class BaseEvalResult:
112
118
  name = self.inference_info.get("checkpoint_name", "")
113
119
  return name
114
120
 
121
+ def _load_eval_data_archive(self, path: Path, pd_index_col=False) -> Dict:
122
+ """Load eval_data from archive"""
123
+ with zipfile.ZipFile(path, mode="r") as zf:
124
+ with zf.open("eval_data.json") as json_f:
125
+ data = json.load(json_f)
126
+ return self._process_value_from_archive(data, zf, pd_index_col)
127
+
128
+ def _process_value_from_archive(self, value, zf: zipfile.ZipFile, pd_index_col: bool = False):
129
+ """Recursively process values from archive, handling nested dicts and lists."""
130
+ if isinstance(value, str) and value.endswith(".npy"):
131
+ with zf.open(value) as arr_f:
132
+ return np.load(arr_f)
133
+ elif isinstance(value, str) and value.endswith(".csv"):
134
+ with zf.open(value) as df_f:
135
+ if pd_index_col:
136
+ return pd.read_csv(df_f, sep="\t", index_col=0)
137
+ return pd.read_csv(df_f, sep="\t")
138
+ elif isinstance(value, dict):
139
+ res = {}
140
+ for k, v in value.items():
141
+ k = int(k) if isinstance(k, str) and k.isdigit() else k
142
+ k = float(k) if isinstance(k, str) and self._is_float(k) else k
143
+ res[k] = self._process_value_from_archive(v, zf, pd_index_col)
144
+ return res
145
+ elif isinstance(value, list):
146
+ return [self._process_value_from_archive(item, zf, pd_index_col) for item in value]
147
+ elif isinstance(value, str) and value.isdigit():
148
+ return int(value)
149
+ else:
150
+ return value
151
+
152
+ def _is_float(self, s: str) -> bool:
153
+ if not s or not isinstance(s, str):
154
+ return False
155
+ try:
156
+ float(s)
157
+ return "." in s or "e" in s.lower()
158
+ except (ValueError, AttributeError):
159
+ return False
160
+
115
161
 
116
162
  class BaseEvaluator:
117
- EVALUATION_PARAMS_YAML_PATH: str = None
163
+ EVALUATION_PARAMS_YAML_PATH: Optional[str] = None
118
164
  eval_result_cls = BaseEvalResult
119
165
 
120
166
  def __init__(
@@ -127,13 +173,14 @@ class BaseEvaluator:
127
173
  classes_whitelist: Optional[List[str]] = None,
128
174
  evaluation_params: Optional[dict] = None,
129
175
  ):
176
+ self.eval_data: Optional[Dict] = None
130
177
  self.gt_project_path = gt_project_path
131
178
  self.pred_project_path = pred_project_path
132
179
  self.result_dir = result_dir
133
180
  self.total_items = items_count
134
181
  self.pbar = progress or tqdm_sly
135
182
  os.makedirs(result_dir, exist_ok=True)
136
- self.classes_whitelist = classes_whitelist
183
+ self.classes_whitelist = classes_whitelist or []
137
184
 
138
185
  if evaluation_params is None:
139
186
  evaluation_params = self._get_default_evaluation_params()
@@ -160,11 +207,57 @@ class BaseEvaluator:
160
207
  if cls.EVALUATION_PARAMS_YAML_PATH is None:
161
208
  return {}
162
209
  else:
163
- return yaml.safe_load(cls.load_yaml_evaluation_params())
210
+ params = cls.load_yaml_evaluation_params()
211
+ if params is None:
212
+ return {}
213
+ return yaml.safe_load(params)
164
214
 
165
215
  def _dump_pickle(self, data, file_path):
166
216
  with open(file_path, "wb") as f:
167
217
  pickle.dump(data, f)
168
218
 
219
+ def _process_value_for_archive(self, value: Any, key_prefix: str, zf: zipfile.ZipFile) -> Any:
220
+ """Recursively process values for archiving, handling nested dicts and lists."""
221
+ if isinstance(value, np.ndarray):
222
+ filename = f"{key_prefix}.npy" if key_prefix else "array.npy"
223
+ filepath = os.path.join(self.result_dir, filename)
224
+ np.save(filepath, value)
225
+ zf.write(filepath, arcname=filename)
226
+ os.remove(filepath)
227
+ return filename
228
+ elif isinstance(value, pd.DataFrame):
229
+ filename = f"{key_prefix}.csv" if key_prefix else "dataframe.csv"
230
+ filepath = os.path.join(self.result_dir, filename)
231
+ value.to_csv(filepath, sep="\t")
232
+ zf.write(filepath, arcname=filename)
233
+ os.remove(filepath)
234
+ return filename
235
+ elif isinstance(value, dict):
236
+ return {
237
+ k: self._process_value_for_archive(v, f"{key_prefix}.{k}" if key_prefix else k, zf)
238
+ for k, v in value.items()
239
+ }
240
+ elif isinstance(value, list):
241
+ return [
242
+ self._process_value_for_archive(item, f"{key_prefix}[{i}]", zf)
243
+ for i, item in enumerate(value)
244
+ ]
245
+ elif isinstance(value, (np.integer, np.floating)):
246
+ return value.item()
247
+ elif isinstance(value, np.bool_):
248
+ return bool(value)
249
+ elif isinstance(value, str) and value.isdigit():
250
+ return int(value)
251
+ else:
252
+ return value
253
+
254
+ def _dump_eval_results_archive(self):
255
+ with zipfile.ZipFile(os.path.join(self.result_dir, "eval_data.zip"), mode="w") as zf:
256
+ data = self._process_value_for_archive(self.eval_data, "", zf)
257
+ filepath = os.path.join(self.result_dir, "eval_data.json")
258
+ dump_json_file(data, filepath, indent=4)
259
+ zf.write(filepath, arcname="eval_data.json")
260
+ silent_remove(filepath)
261
+
169
262
  def get_eval_result(self) -> BaseEvalResult:
170
263
  return self.eval_result_cls(self.result_dir)
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- import os
4
3
  from pathlib import Path
5
4
 
6
5
  from supervisely.io.json import dump_json_file
@@ -57,16 +56,10 @@ class InstanceSegmentationEvaluator(ObjectDetectionEvaluator):
57
56
  self._dump_eval_results()
58
57
 
59
58
  def _dump_eval_results(self):
59
+ self._dump_eval_results_archive()
60
60
  _, _, eval_data_path = self._get_eval_paths()
61
61
  self._dump_pickle(self.eval_data, eval_data_path)
62
62
 
63
- def _get_eval_paths(self):
64
- base_dir = self.result_dir
65
- cocoGt_path = os.path.join(base_dir, "cocoGt.json")
66
- cocoDt_path = os.path.join(base_dir, "cocoDt.json")
67
- eval_data_path = os.path.join(base_dir, "eval_data.pkl")
68
- return cocoGt_path, cocoDt_path, eval_data_path
69
-
70
63
  def _dump_datasets(self):
71
64
  cocoGt_path, cocoDt_path, _ = self._get_eval_paths()
72
65
  dump_json_file(self.cocoGt_json, cocoGt_path, indent=None)
@@ -15,6 +15,7 @@ from supervisely.nn.benchmark.utils import (
15
15
  sly2coco,
16
16
  )
17
17
  from supervisely.nn.benchmark.visualization.vis_click_data import ClickData, IdMapper
18
+ from supervisely.sly_logger import logger
18
19
 
19
20
 
20
21
  class ObjectDetectionEvalResult(BaseEvalResult):
@@ -30,10 +31,24 @@ class ObjectDetectionEvalResult(BaseEvalResult):
30
31
  if self.coco_gt.exists() and self.coco_dt.exists():
31
32
  self.coco_gt, self.coco_dt = read_coco_datasets(self.coco_gt, self.coco_dt)
32
33
 
33
- eval_data_path = Path(path) / "eval_data.pkl"
34
- if eval_data_path.exists():
35
- with open(Path(path, "eval_data.pkl"), "rb") as f:
36
- self.eval_data = pickle.load(f)
34
+ eval_data_pickle_path = Path(path) / "eval_data.pkl"
35
+ eval_data_archive_path = Path(path) / "eval_data.zip"
36
+ if eval_data_pickle_path.exists():
37
+ try:
38
+ with open(Path(path, "eval_data.pkl"), "rb") as f:
39
+ self.eval_data = pickle.load(f)
40
+ except Exception as e:
41
+ logger.warning(f"Failed to load eval_data.pkl: {e}", exc_info=True)
42
+ self.eval_data = None
43
+ if self.eval_data is None and eval_data_archive_path.exists():
44
+ try:
45
+ self.eval_data = self._load_eval_data_archive(eval_data_archive_path)
46
+ except Exception as e:
47
+ logger.warning(f"Failed to load eval_data from archive: {e}", exc_info=True)
48
+ self.eval_data = None
49
+
50
+ if self.eval_data is None:
51
+ raise ValueError("Failed to load eval_data. Please contact support.")
37
52
 
38
53
  inference_info_path = Path(path) / "inference_info.json"
39
54
  if inference_info_path.exists():
@@ -172,6 +187,7 @@ class ObjectDetectionEvaluator(BaseEvaluator):
172
187
  cocoGt_path, cocoDt_path, eval_data_path = self._get_eval_paths()
173
188
  dump_json_file(self.cocoGt_json, cocoGt_path, indent=None)
174
189
  dump_json_file(self.cocoDt_json, cocoDt_path, indent=None)
190
+ self._dump_eval_results_archive()
175
191
  self._dump_pickle(self.eval_data, eval_data_path)
176
192
 
177
193
  def _get_eval_paths(self):
@@ -19,16 +19,21 @@ class PRCurve(DetectionVisMetric):
19
19
 
20
20
  @property
21
21
  def md(self) -> MarkdownWidget:
22
- text = self.vis_texts.markdown_pr_curve.format(self.vis_texts.definitions.about_pr_tradeoffs)
22
+ text = self.vis_texts.markdown_pr_curve.format(
23
+ self.vis_texts.definitions.about_pr_tradeoffs
24
+ )
23
25
  return MarkdownWidget(self.MARKDOWN, "Precision-Recall Curve", text)
24
26
 
25
27
  @property
26
28
  def notification(self) -> NotificationWidget:
27
29
  title, _ = self.vis_texts.notification_ap.values()
28
- return NotificationWidget(
29
- self.NOTIFICATION,
30
- title.format(self.eval_result.mp.base_metrics()["mAP"].round(2)),
31
- )
30
+ _map = self.eval_result.mp.base_metrics()["mAP"]
31
+ if isinstance(_map, float):
32
+ _map = round(_map, 2)
33
+ elif isinstance(_map, np.ndarray):
34
+ _map = float(np.round(_map, 2))
35
+
36
+ return NotificationWidget(self.NOTIFICATION, title.format(_map))
32
37
 
33
38
  @property
34
39
  def chart(self) -> ChartWidget:
@@ -4,11 +4,10 @@ import os
4
4
  import pickle
5
5
  import shutil
6
6
  from pathlib import Path
7
- from typing import List
7
+ from typing import List, Optional
8
8
 
9
9
  import cv2
10
10
  import numpy as np
11
- from tqdm import tqdm
12
11
 
13
12
  from supervisely.io.json import load_json_file
14
13
  from supervisely.nn.benchmark.base_evaluator import BaseEvalResult, BaseEvaluator
@@ -30,10 +29,25 @@ class SemanticSegmentationEvalResult(BaseEvalResult):
30
29
  def _read_files(self, path: str) -> None:
31
30
  """Read all necessary files from the directory"""
32
31
 
33
- eval_data_path = Path(path) / "eval_data.pkl"
34
- if eval_data_path.exists():
35
- with open(Path(path, "eval_data.pkl"), "rb") as f:
36
- self.eval_data = pickle.load(f)
32
+ eval_data_pickle_path = Path(path) / "eval_data.pkl"
33
+ eval_data_archive_path = Path(path) / "eval_data.zip"
34
+ if eval_data_pickle_path.exists():
35
+ try:
36
+ with open(Path(path, "eval_data.pkl"), "rb") as f:
37
+ self.eval_data = pickle.load(f)
38
+ except Exception as e:
39
+ logger.warning(f"Failed to load eval_data.pkl: {e}", exc_info=True)
40
+ self.eval_data = None
41
+
42
+ if self.eval_data is None and eval_data_archive_path.exists():
43
+ try:
44
+ self.eval_data = self._load_eval_data_archive(eval_data_archive_path, pd_index_col=True)
45
+ except Exception as e:
46
+ logger.warning(f"Failed to load eval_data from archive: {e}", exc_info=True)
47
+ self.eval_data = None
48
+
49
+ if self.eval_data is None:
50
+ raise ValueError("Failed to load eval_data. Please contact support.")
37
51
 
38
52
  inference_info_path = Path(path) / "inference_info.json"
39
53
  if inference_info_path.exists():
@@ -69,12 +83,12 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
69
83
 
70
84
  def __init__(self, *args, **kwargs):
71
85
  super().__init__(*args, **kwargs)
72
- self.bg_cls_name = None
73
- self.bg_cls_color = None
86
+ self.bg_cls_name: Optional[str] = None
87
+ self.bg_cls_color: Optional[List[int]] = None
74
88
 
75
89
  def evaluate(self):
76
90
  self.bg_cls_name = self._get_bg_class_name()
77
- if self.bg_cls_name not in self.classes_whitelist:
91
+ if self.bg_cls_name not in self.classes_whitelist and self.bg_cls_name is not None:
78
92
  self.classes_whitelist.append(self.bg_cls_name)
79
93
 
80
94
  gt_prep_path, pred_prep_path = self.prepare_segmentation_data()
@@ -94,9 +108,9 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
94
108
  self._dump_eval_results()
95
109
  logger.info("Evaluation results are saved")
96
110
 
97
- def _get_palette(self, project_path):
111
+ def _get_palette(self, project_path) -> List[List[int]]:
98
112
  meta_path = Path(project_path) / "meta.json"
99
- meta = ProjectMeta.from_json(load_json_file(meta_path))
113
+ meta = ProjectMeta.from_json(load_json_file(str(meta_path)))
100
114
 
101
115
  palette = []
102
116
  for cls_name in self.classes_whitelist:
@@ -109,8 +123,9 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
109
123
  return palette
110
124
 
111
125
  def _dump_eval_results(self):
126
+ self._dump_eval_results_archive()
112
127
  eval_data_path = self._get_eval_data_path()
113
- self._dump_pickle(self.eval_data, eval_data_path) # TODO: maybe dump JSON?
128
+ self._dump_pickle(self.eval_data, eval_data_path)
114
129
 
115
130
  def _get_eval_data_path(self):
116
131
  base_dir = self.result_dir
@@ -130,11 +145,14 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
130
145
  continue
131
146
 
132
147
  palette = self._get_palette(src_dir)
133
- bg_cls_idx = self.classes_whitelist.index(self.bg_cls_name)
148
+ if self.bg_cls_name is not None:
149
+ bg_cls_idx = self.classes_whitelist.index(self.bg_cls_name)
150
+ else:
151
+ raise ValueError("Background class name is not set.")
134
152
  try:
135
153
  bg_color = palette[bg_cls_idx]
136
154
  except IndexError:
137
- bg_color = (0, 0, 0)
155
+ bg_color = [0, 0, 0]
138
156
  output_dir.mkdir(parents=True)
139
157
  temp_seg_dir = src_dir + "_temp"
140
158
  if not os.path.exists(temp_seg_dir):
@@ -174,14 +192,14 @@ class SemanticSegmentationEvaluator(BaseEvaluator):
174
192
 
175
193
  return output_dirs
176
194
 
177
- def _get_bg_class_name(self):
195
+ def _get_bg_class_name(self) -> Optional[str]:
178
196
  possible_names = ["background", "bg", "unlabeled", "neutral", "__bg__"]
179
197
  logger.info(f"Searching for background class in projects. Possible names: {possible_names}")
180
198
 
181
199
  bg_cls_names = []
182
200
  for project_path in [self.gt_project_path, self.pred_project_path]:
183
201
  meta_path = Path(project_path) / "meta.json"
184
- meta = ProjectMeta.from_json(load_json_file(meta_path))
202
+ meta = ProjectMeta.from_json(load_json_file(str(meta_path)))
185
203
 
186
204
  for obj_cls in meta.obj_classes:
187
205
  if obj_cls.name in possible_names:
@@ -82,7 +82,7 @@ class ConfusionMatrix(SemanticSegmVisMetric):
82
82
  res["clickData"][key]["imagesIds"] = []
83
83
 
84
84
  cmat_key = str(ig) + "_" + str(ip)
85
- for name in self.eval_result.mp.cmat_cell_img_names[cmat_key]:
85
+ for name in self.eval_result.mp.cmat_cell_img_names.get(cmat_key, []):
86
86
  gt_img_id = self.eval_result.images_map[name]
87
87
  pred_img_id = self.eval_result.matched_pair_data[gt_img_id].pred_image_info.id
88
88
  res["clickData"][key]["imagesIds"].append(pred_img_id)
@@ -91,7 +91,7 @@ class FrequentlyConfused(SemanticSegmVisMetric):
91
91
  res["clickData"][key] = {}
92
92
  res["clickData"][key]["imagesIds"] = []
93
93
  idx_key = str(gt_idx) + "_" + str(pred_idx)
94
- for name in self.eval_result.mp.cmat_cell_img_names[idx_key]:
94
+ for name in self.eval_result.mp.cmat_cell_img_names.get(idx_key, []):
95
95
  gt_img_id = self.eval_result.images_map[name]
96
96
  pred_img_id = self.eval_result.matched_pair_data[gt_img_id].pred_image_info.id
97
97
  res["clickData"][key]["imagesIds"].append(pred_img_id)
@@ -22,7 +22,7 @@ class Overview(SemanticSegmVisMetric):
22
22
  url = self.eval_result.inference_info.get("checkpoint_url")
23
23
  link_text = self.eval_result.inference_info.get("custom_checkpoint_path")
24
24
  if link_text is None:
25
- link_text = url
25
+ link_text = url or ""
26
26
  link_text = link_text.replace("_", "\_")
27
27
 
28
28
  model_name = self.eval_result.inference_info.get("model_name") or "Custom"
@@ -1,13 +1,16 @@
1
+ import json
1
2
  import pickle
3
+ import zipfile
2
4
  from pathlib import Path
3
5
  from typing import Dict, List, Optional
4
6
 
7
+ import numpy as np
5
8
  import pandas as pd
6
9
 
7
10
  from supervisely.annotation.annotation import ProjectMeta
8
11
  from supervisely.api.api import Api
9
- from supervisely.api.module_api import ApiField
10
12
  from supervisely.api.dataset_api import DatasetInfo
13
+ from supervisely.api.module_api import ApiField
11
14
  from supervisely.api.project_api import ProjectInfo
12
15
  from supervisely.app.widgets import SlyTqdm
13
16
  from supervisely.io.env import team_id
@@ -197,6 +200,49 @@ class EvalResult:
197
200
  # progress_cb=pbar.update,
198
201
  # )
199
202
 
203
+ def _load_eval_data_archive(self, path: Path, pd_index_col: bool = False) -> Dict:
204
+ """Load eval_data from archive"""
205
+ with zipfile.ZipFile(path, mode="r") as zf:
206
+ with zf.open("eval_data.json") as json_f:
207
+ data = json.load(json_f)
208
+ return self._process_value_from_archive(data, zf, pd_index_col=pd_index_col)
209
+
210
+ def _process_value_from_archive(self, value, zf: zipfile.ZipFile, pd_index_col: bool = False):
211
+ """Recursively process values from archive, handling nested dicts and lists."""
212
+ if isinstance(value, str) and value.endswith(".npy"):
213
+ with zf.open(value) as arr_f:
214
+ return np.load(arr_f)
215
+ elif isinstance(value, str) and value.endswith(".csv"):
216
+ with zf.open(value) as df_f:
217
+ if pd_index_col:
218
+ return pd.read_csv(df_f, sep="\t", index_col=0)
219
+ return pd.read_csv(df_f, sep="\t")
220
+ elif isinstance(value, dict):
221
+ res = {}
222
+ for k, v in value.items():
223
+ k = int(k) if isinstance(k, str) and k.isdigit() else k
224
+ k = float(k) if isinstance(k, str) and self._is_float(k) else k
225
+ res[k] = self._process_value_from_archive(v, zf, pd_index_col=pd_index_col)
226
+ return res
227
+ elif isinstance(value, list):
228
+ return [
229
+ self._process_value_from_archive(item, zf, pd_index_col=pd_index_col)
230
+ for item in value
231
+ ]
232
+ elif isinstance(value, str) and value.isdigit():
233
+ return int(value)
234
+ else:
235
+ return value
236
+
237
+ def _is_float(self, s: str) -> bool:
238
+ if not s or not isinstance(s, str):
239
+ return False
240
+ try:
241
+ float(s)
242
+ return "." in s or "e" in s.lower()
243
+ except (ValueError, AttributeError):
244
+ return False
245
+
200
246
  def _read_eval_data(self):
201
247
  from pycocotools.coco import COCO # pylint: disable=import-error
202
248
 
@@ -205,9 +251,25 @@ class EvalResult:
205
251
  coco_gt, coco_dt = COCO(gt_path), COCO(dt_path)
206
252
  self.coco_gt = coco_gt
207
253
  self.coco_dt = coco_dt
208
- self.eval_data = pickle.load(
209
- open(Path(self.local_dir, "evaluation", "eval_data.pkl"), "rb")
210
- )
254
+ eval_data_pickle_path = Path(self.local_dir, "evaluation", "eval_data.pkl")
255
+ eval_data_archive_path = Path(self.local_dir, "evaluation", "eval_data.zip")
256
+ if eval_data_pickle_path.exists():
257
+ try:
258
+ with open(eval_data_pickle_path, "rb") as f:
259
+ self.eval_data = pickle.load(f)
260
+ except Exception as e:
261
+ logger.warning(f"Failed to load eval_data.pkl: {e}", exc_info=True)
262
+ self.eval_data = None
263
+ if self.eval_data is None and eval_data_archive_path.exists():
264
+ try:
265
+ self.eval_data = self._load_eval_data_archive(eval_data_archive_path)
266
+ except Exception as e:
267
+ logger.warning(f"Failed to load eval_data from archive: {e}", exc_info=True)
268
+ self.eval_data = None
269
+
270
+ if self.eval_data is None:
271
+ raise ValueError("Failed to load eval_data. Please contact support.")
272
+
211
273
  self.inference_info = load_json_file(
212
274
  Path(self.local_dir, "evaluation", "inference_info.json")
213
275
  )