supervisely 6.73.444__py3-none-any.whl → 6.73.468__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (68) hide show
  1. supervisely/__init__.py +24 -1
  2. supervisely/_utils.py +81 -0
  3. supervisely/annotation/json_geometries_map.py +2 -0
  4. supervisely/api/dataset_api.py +74 -12
  5. supervisely/api/entity_annotation/figure_api.py +8 -5
  6. supervisely/api/image_api.py +4 -0
  7. supervisely/api/video/video_annotation_api.py +4 -2
  8. supervisely/api/video/video_api.py +41 -1
  9. supervisely/app/__init__.py +1 -1
  10. supervisely/app/content.py +14 -6
  11. supervisely/app/fastapi/__init__.py +1 -0
  12. supervisely/app/fastapi/custom_static_files.py +1 -1
  13. supervisely/app/fastapi/multi_user.py +88 -0
  14. supervisely/app/fastapi/subapp.py +88 -42
  15. supervisely/app/fastapi/websocket.py +77 -9
  16. supervisely/app/singleton.py +21 -0
  17. supervisely/app/v1/app_service.py +18 -2
  18. supervisely/app/v1/constants.py +7 -1
  19. supervisely/app/widgets/card/card.py +20 -0
  20. supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
  21. supervisely/app/widgets/dialog/dialog.py +12 -0
  22. supervisely/app/widgets/dialog/template.html +2 -1
  23. supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
  24. supervisely/app/widgets/fast_table/fast_table.py +121 -31
  25. supervisely/app/widgets/fast_table/template.html +1 -1
  26. supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
  27. supervisely/app/widgets/radio_tabs/template.html +1 -0
  28. supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +65 -7
  29. supervisely/app/widgets/table/table.py +68 -13
  30. supervisely/app/widgets/tree_select/tree_select.py +2 -0
  31. supervisely/convert/image/csv/csv_converter.py +24 -15
  32. supervisely/convert/video/video_converter.py +2 -2
  33. supervisely/geometry/polyline_3d.py +110 -0
  34. supervisely/io/env.py +76 -1
  35. supervisely/nn/inference/cache.py +37 -17
  36. supervisely/nn/inference/inference.py +667 -114
  37. supervisely/nn/inference/inference_request.py +15 -8
  38. supervisely/nn/inference/predict_app/gui/classes_selector.py +81 -12
  39. supervisely/nn/inference/predict_app/gui/gui.py +676 -488
  40. supervisely/nn/inference/predict_app/gui/input_selector.py +205 -26
  41. supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
  42. supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
  43. supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
  44. supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
  45. supervisely/nn/inference/predict_app/gui/utils.py +236 -119
  46. supervisely/nn/inference/predict_app/predict_app.py +2 -2
  47. supervisely/nn/inference/session.py +43 -35
  48. supervisely/nn/model/model_api.py +9 -0
  49. supervisely/nn/model/prediction_session.py +8 -7
  50. supervisely/nn/prediction_dto.py +7 -0
  51. supervisely/nn/tracker/base_tracker.py +11 -1
  52. supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
  53. supervisely/nn/tracker/botsort_tracker.py +14 -7
  54. supervisely/nn/tracker/visualize.py +70 -72
  55. supervisely/nn/training/gui/train_val_splits_selector.py +52 -31
  56. supervisely/nn/training/train_app.py +10 -5
  57. supervisely/project/project.py +9 -1
  58. supervisely/video/sampling.py +39 -20
  59. supervisely/video/video.py +41 -12
  60. supervisely/volume/stl_converter.py +2 -0
  61. supervisely/worker_api/agent_rpc.py +24 -1
  62. supervisely/worker_api/rpc_servicer.py +31 -7
  63. {supervisely-6.73.444.dist-info → supervisely-6.73.468.dist-info}/METADATA +14 -11
  64. {supervisely-6.73.444.dist-info → supervisely-6.73.468.dist-info}/RECORD +68 -66
  65. {supervisely-6.73.444.dist-info → supervisely-6.73.468.dist-info}/LICENSE +0 -0
  66. {supervisely-6.73.444.dist-info → supervisely-6.73.468.dist-info}/WHEEL +0 -0
  67. {supervisely-6.73.444.dist-info → supervisely-6.73.468.dist-info}/entry_points.txt +0 -0
  68. {supervisely-6.73.444.dist-info → supervisely-6.73.468.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,5 @@
1
+ # isort: skip_file
2
+
1
3
  import copy
2
4
  import io
3
5
 
@@ -54,9 +56,8 @@ class PackerUnpacker:
54
56
 
55
57
  @staticmethod
56
58
  def pandas_unpacker(data: pd.DataFrame):
57
- data = data.replace({np.nan: None})
58
- # data = data.astype(object).replace(np.nan, "-") # TODO: replace None later
59
-
59
+ # Keep None/NaN values in source data, don't replace them
60
+ # They will be converted to "" only when sending to frontend
60
61
  unpacked_data = {
61
62
  "columns": data.columns.to_list(),
62
63
  "data": data.values.tolist(),
@@ -169,9 +170,35 @@ class Table(Widget):
169
170
 
170
171
  super().__init__(widget_id=widget_id, file_path=__file__)
171
172
 
173
+ def _prepare_data_for_frontend(self, data_dict):
174
+ """Convert None and NaN values to empty strings for frontend display.
175
+ This preserves the original None/NaN values in _parsed_data.
176
+ """
177
+ import math
178
+
179
+ display_data = copy.deepcopy(data_dict)
180
+
181
+ # Convert None/NaN in data rows
182
+ for row in display_data.get("data", []):
183
+ for i in range(len(row)):
184
+ value = row[i]
185
+ # Check for None or NaN (NaN is a float that doesn't equal itself)
186
+ if value is None or (isinstance(value, float) and math.isnan(value)):
187
+ row[i] = ""
188
+
189
+ # Convert None/NaN in summary row if present
190
+ if "summaryRow" in display_data and display_data["summaryRow"] is not None:
191
+ summary_row = display_data["summaryRow"]
192
+ for i in range(len(summary_row)):
193
+ value = summary_row[i]
194
+ if value is None or (isinstance(value, float) and math.isnan(value)):
195
+ summary_row[i] = ""
196
+
197
+ return display_data
198
+
172
199
  def get_json_data(self):
173
200
  return {
174
- "table_data": self._parsed_data,
201
+ "table_data": self._prepare_data_for_frontend(self._parsed_data),
175
202
  "table_options": {
176
203
  "perPage": self._per_page,
177
204
  "pageSizes": self._page_sizes,
@@ -255,13 +282,17 @@ class Table(Widget):
255
282
 
256
283
  def read_json(self, value: dict) -> None:
257
284
  self._update_table_data(input_data=value)
258
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
285
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
286
+ self._parsed_data
287
+ )
259
288
  DataJson().send_changes()
260
289
  self.clear_selection()
261
290
 
262
291
  def read_pandas(self, value: pd.DataFrame) -> None:
263
292
  self._update_table_data(input_data=value)
264
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
293
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
294
+ self._parsed_data
295
+ )
265
296
  DataJson().send_changes()
266
297
  self.clear_selection()
267
298
 
@@ -272,7 +303,9 @@ class Table(Widget):
272
303
  index = len(table_data) if index > len(table_data) or index < 0 else index
273
304
 
274
305
  self._parsed_data["data"].insert(index, data)
275
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
306
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
307
+ self._parsed_data
308
+ )
276
309
  DataJson().send_changes()
277
310
 
278
311
  def pop_row(self, index=-1):
@@ -284,7 +317,9 @@ class Table(Widget):
284
317
 
285
318
  if len(self._parsed_data["data"]) != 0:
286
319
  popped_row = self._parsed_data["data"].pop(index)
287
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
320
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
321
+ self._parsed_data
322
+ )
288
323
  DataJson().send_changes()
289
324
  return popped_row
290
325
 
@@ -382,11 +417,27 @@ class Table(Widget):
382
417
  StateJson()[self.widget_id]["selected_row"] = {}
383
418
  StateJson().send_changes()
384
419
 
420
+ @staticmethod
421
+ def _values_equal(val1, val2):
422
+ """Compare two values, handling NaN specially."""
423
+ import math
424
+
425
+ # Check if both are NaN
426
+ is_nan1 = isinstance(val1, float) and math.isnan(val1)
427
+ is_nan2 = isinstance(val2, float) and math.isnan(val2)
428
+ if is_nan1 and is_nan2:
429
+ return True
430
+ # Check if both are None
431
+ if val1 is None and val2 is None:
432
+ return True
433
+ # Regular comparison
434
+ return val1 == val2
435
+
385
436
  def delete_row(self, key_column_name, key_cell_value):
386
437
  col_index = self._parsed_data["columns"].index(key_column_name)
387
438
  row_indices = []
388
439
  for idx, row in enumerate(self._parsed_data["data"]):
389
- if row[col_index] == key_cell_value:
440
+ if self._values_equal(row[col_index], key_cell_value):
390
441
  row_indices.append(idx)
391
442
  if len(row_indices) == 0:
392
443
  raise ValueError('Column "{key_column_name}" does not have value "{key_cell_value}"')
@@ -400,7 +451,7 @@ class Table(Widget):
400
451
  key_col_index = self._parsed_data["columns"].index(key_column_name)
401
452
  row_indices = []
402
453
  for idx, row in enumerate(self._parsed_data["data"]):
403
- if row[key_col_index] == key_cell_value:
454
+ if self._values_equal(row[key_col_index], key_cell_value):
404
455
  row_indices.append(idx)
405
456
  if len(row_indices) == 0:
406
457
  raise ValueError('Column "{key_column_name}" does not have value "{key_cell_value}"')
@@ -411,20 +462,24 @@ class Table(Widget):
411
462
 
412
463
  col_index = self._parsed_data["columns"].index(column_name)
413
464
  self._parsed_data["data"][row_indices[0]][col_index] = new_value
414
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
465
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
466
+ self._parsed_data
467
+ )
415
468
  DataJson().send_changes()
416
469
 
417
470
  def update_matching_cells(self, key_column_name, key_cell_value, column_name, new_value):
418
471
  key_col_index = self._parsed_data["columns"].index(key_column_name)
419
472
  row_indices = []
420
473
  for idx, row in enumerate(self._parsed_data["data"]):
421
- if row[key_col_index] == key_cell_value:
474
+ if self._values_equal(row[key_col_index], key_cell_value):
422
475
  row_indices.append(idx)
423
476
 
424
477
  col_index = self._parsed_data["columns"].index(column_name)
425
478
  for row_idx in row_indices:
426
479
  self._parsed_data["data"][row_idx][col_index] = new_value
427
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
480
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
481
+ self._parsed_data
482
+ )
428
483
  DataJson().send_changes()
429
484
 
430
485
  def sort(self, column_id: int = None, direction: Optional[Literal["asc", "desc"]] = None):
@@ -264,6 +264,8 @@ class TreeSelect(Widget):
264
264
 
265
265
  def _get_all_items(items: List[TreeSelect.Item]) -> List[TreeSelect.Item]:
266
266
  res = []
267
+ if not items:
268
+ return res
267
269
  for item in items:
268
270
  res.append(item)
269
271
  res.extend(_get_all_items(item.children))
@@ -24,6 +24,7 @@ from supervisely.io.fs import (
24
24
  get_file_name_with_ext,
25
25
  list_files_recursively,
26
26
  )
27
+ from supervisely.io.env import team_id
27
28
  from supervisely.io.json import load_json_file
28
29
  from supervisely.project.project_settings import LabelingInterface
29
30
 
@@ -78,16 +79,16 @@ class CSVConverter(ImageConverter):
78
79
  }
79
80
 
80
81
  def __init__(
81
- self,
82
- input_data: str,
83
- labeling_interface: Optional[Union[LabelingInterface, str]],
84
- upload_as_links: bool,
85
- remote_files_map: Optional[Dict[str, str]] = None,
82
+ self,
83
+ input_data: str,
84
+ labeling_interface: Optional[Union[LabelingInterface, str]],
85
+ upload_as_links: bool,
86
+ remote_files_map: Optional[Dict[str, str]] = None,
86
87
  ):
87
88
  super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
88
89
 
90
+ self._supports_links = True
89
91
  self._csv_reader = None
90
- self._team_id = None
91
92
 
92
93
  def __str__(self):
93
94
  return AvailableImageConverters.CSV
@@ -121,6 +122,12 @@ class CSVConverter(ImageConverter):
121
122
 
122
123
  full_path = valid_files[0]
123
124
 
125
+ if self.upload_as_links and self._supports_links:
126
+ for local_path, remote_path in self._remote_files_map.items():
127
+ if local_path.endswith(full_path):
128
+ self._api.storage.download(self._team_id, remote_path, local_path)
129
+ break
130
+
124
131
  file_ext = get_file_ext(full_path)
125
132
  if file_ext in self.conversion_functions:
126
133
  csv_full_path = os.path.splitext(full_path)[0] + ".csv"
@@ -147,7 +154,7 @@ class CSVConverter(ImageConverter):
147
154
  team_files = False
148
155
  break
149
156
  if item_path is None:
150
- logger.warn(f"Failed to find image path in row: {row}. Skipping.")
157
+ logger.warning(f"Failed to find image path in row: {row}. Skipping.")
151
158
  continue
152
159
  ann_data = row.get("tag")
153
160
  item = CSVConverter.Item(
@@ -192,7 +199,7 @@ class CSVConverter(ImageConverter):
192
199
  ann_json = csv_helper.rename_in_json(ann_json, renamed_classes, renamed_tags)
193
200
  return Annotation.from_json(ann_json, meta)
194
201
  except Exception as e:
195
- logger.warn(f"Failed to convert annotation: {repr(e)}")
202
+ logger.warning(f"Failed to convert annotation: {repr(e)}")
196
203
  return item.create_empty_annotation()
197
204
 
198
205
  def process_remote_image(
@@ -209,19 +216,21 @@ class CSVConverter(ImageConverter):
209
216
  image_path = image_path.strip()
210
217
  if is_team_file:
211
218
  if not api.file.exists(team_id, image_path):
212
- logger.warn(f"File {image_path} not found in Team Files. Skipping...")
219
+ logger.warning(f"File {image_path} not found in Team Files. Skipping...")
213
220
  return None
214
221
  team_file_image_info = api.file.list(team_id, image_path)
215
222
  image_path = team_file_image_info[0]["fullStorageUrl"]
216
223
  if not image_path:
217
- logger.warn(f"Failed to get full storage URL for file '{image_path}'. Skipping...")
224
+ logger.warning(
225
+ f"Failed to get full storage URL for file '{image_path}'. Skipping..."
226
+ )
218
227
  return None
219
228
 
220
229
  extension = os.path.splitext(image_path)[1]
221
230
  if not extension:
222
- logger.warn(f"FYI: Image [{image_path}] doesn't have extension.")
231
+ logger.warning(f"FYI: Image [{image_path}] doesn't have extension.")
223
232
  elif extension.lower() not in SUPPORTED_IMG_EXTS:
224
- logger.warn(
233
+ logger.warning(
225
234
  f"Image [{image_path}] has unsupported extension [{extension}]. Skipping..."
226
235
  )
227
236
  return None
@@ -234,7 +243,7 @@ class CSVConverter(ImageConverter):
234
243
  force_metadata_for_links=force_metadata,
235
244
  )
236
245
  except Exception:
237
- logger.warn(f"Failed to upload image {image_name}. Skipping...")
246
+ logger.warning(f"Failed to link image {image_name}. Skipping...")
238
247
  return None
239
248
  if progress_cb is not None:
240
249
  progress_cb(1)
@@ -312,7 +321,7 @@ class CSVConverter(ImageConverter):
312
321
  success = False
313
322
  continue
314
323
  if item.name not in info.name:
315
- logger.warn(
324
+ logger.warning(
316
325
  f"Batched image with name '{item.name}' doesn't match uploaded image name '{info.name}'"
317
326
  )
318
327
  success = False
@@ -339,4 +348,4 @@ class CSVConverter(ImageConverter):
339
348
  if success:
340
349
  logger.info(f"Dataset ID:'{dataset_id}' has been successfully uploaded.")
341
350
  else:
342
- logger.warn(f"Dataset ID:'{dataset_id}' has been uploaded.")
351
+ logger.warning(f"Dataset ID:'{dataset_id}' has been uploaded.")
@@ -266,8 +266,8 @@ class VideoConverter(BaseConverter):
266
266
  if codec_type not in ["video", "audio"]:
267
267
  continue
268
268
  codec_name = stream["codecName"]
269
- if codec_type == "video" and codec_name != "h264":
270
- logger.info(f"Video codec is not h264, transcoding is required: {codec_name}")
269
+ if codec_type == "video" and codec_name not in ["h264", "h265", "hevc", "av1"]:
270
+ logger.info(f"Video codec is not h264/h265/hevc/av1, transcoding is required: {codec_name}")
271
271
  need_video_transc = True
272
272
  elif codec_type == "audio" and codec_name != "aac":
273
273
  logger.info(f"Audio codec is not aac, transcoding is required: {codec_name}")
@@ -0,0 +1,110 @@
1
+ from supervisely.geometry.geometry import Geometry
2
+ from supervisely.geometry.constants import LABELER_LOGIN, UPDATED_AT, CREATED_AT, ID, CLASS_ID
3
+ from supervisely.geometry.cuboid_3d import Vector3d
4
+ from typing import List, Union
5
+
6
+
7
+ class Polyline3D(Geometry):
8
+ """
9
+ Polyline3D geometry
10
+
11
+ :param points: List of 3D point coordinates which define the polyline in 3D space.
12
+ :type points: List[List[int, int, int]]
13
+ :param sly_id: Polyline ID in Supervisely server.
14
+ :type sly_id: int, optional
15
+ :param class_id: ID of :class:`ObjClass<supervisely.annotation.obj_class.ObjClass>` to which Polyline belongs.
16
+ :type class_id: int, optional
17
+ :param labeler_login: Login of the user who created Polyline.
18
+ :type labeler_login: str, optional
19
+ :param updated_at: Date and Time when Polyline was modified last. Date Format: Year:Month:Day:Hour:Minute:Seconds. Example: '2021-01-22T19:37:50.158Z'.
20
+ :type updated_at: str, optional
21
+ :param created_at: Date and Time when Polyline was created. Date Format is the same as in "updated_at" parameter.
22
+ :type created_at: str, optional
23
+
24
+ :Usage example:
25
+
26
+ .. code-block:: python
27
+
28
+ import supervisely as sly
29
+
30
+ points = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
31
+ figure = sly.Polyline(points)
32
+ """
33
+
34
+ @staticmethod
35
+ def geometry_name():
36
+ return "polyline_3d"
37
+
38
+ def __init__(
39
+ self,
40
+ points: Union[List[float], List[Vector3d]],
41
+ sly_id=None,
42
+ class_id=None,
43
+ labeler_login=None,
44
+ updated_at=None,
45
+ created_at=None,
46
+ ):
47
+ if not isinstance(points[0], Vector3d):
48
+ points = [Vector3d(point[0], point[1], point[2]) for point in points]
49
+ super().__init__(
50
+ sly_id=sly_id,
51
+ class_id=class_id,
52
+ labeler_login=labeler_login,
53
+ updated_at=updated_at,
54
+ created_at=created_at,
55
+ )
56
+
57
+ self._points = points
58
+
59
+ @property
60
+ def points(self):
61
+ return self._points
62
+
63
+ def to_json(self):
64
+ points = [[point.x, point.y, point.z] for point in self._points]
65
+ res = {"points": points}
66
+ self._add_creation_info(res)
67
+ return res
68
+
69
+ @classmethod
70
+ def from_json(cls, data):
71
+ """
72
+ Convert a json dict to Polyline3D.
73
+
74
+ :param data: Polyline3D in json format as a dict.
75
+ :type data: dict
76
+ :return: Polyline3D object
77
+ :rtype: :class:`Polyline3D<Polyline3D>`
78
+ :Usage example:
79
+
80
+ .. code-block:: python
81
+
82
+ import supervisely as sly
83
+
84
+ figure_json = {
85
+ "points": {
86
+ [
87
+ [1, 2, 3],
88
+ [4, 5, 6],
89
+ [7, 8, 9]
90
+ ],
91
+ }
92
+ }
93
+ figure = sly.Polyline3D.from_json(figure_json)
94
+ """
95
+ if not data.get("points"):
96
+ raise ValueError("Data dict must contain 'points' field!")
97
+ points = data["points"]
98
+ labeler_login = data.get(LABELER_LOGIN, None)
99
+ updated_at = data.get(UPDATED_AT, None)
100
+ created_at = data.get(CREATED_AT, None)
101
+ sly_id = data.get(ID, None)
102
+ class_id = data.get(CLASS_ID, None)
103
+ return cls(
104
+ points,
105
+ sly_id=sly_id,
106
+ class_id=class_id,
107
+ labeler_login=labeler_login,
108
+ updated_at=updated_at,
109
+ created_at=created_at,
110
+ )
supervisely/io/env.py CHANGED
@@ -1,10 +1,14 @@
1
1
  # coding: utf-8
2
2
  import json
3
3
  import os
4
+ from contextvars import ContextVar, Token
4
5
  from typing import Callable, List, Literal, Optional, Union
5
6
 
6
7
  RAISE_IF_NOT_FOUND = True
7
-
8
+ _MULTIUSER_USER_CTX: ContextVar[Optional[Union[int, str]]] = ContextVar(
9
+ "supervisely_multiuser_app_user_id",
10
+ default=None,
11
+ )
8
12
 
9
13
  def flag_from_env(s: str) -> bool:
10
14
  """Returns True if passed string is a flag, False otherwise.
@@ -771,3 +775,74 @@ def add_uploaded_ids_to_env(dataset_id: int, ids: List[int]) -> None:
771
775
  else:
772
776
  uploaded[str(dataset_id)].extend(ids)
773
777
  os.environ["UPLOADED_IDS"] = json.dumps(uploaded)
778
+
779
+
780
+ def is_multiuser_mode_enabled() -> bool:
781
+ """Returns multiuser app mode flag from environment variable using following keys:
782
+ - SUPERVISELY_MULTIUSER_APP_MODE
783
+ :return: multiuser app mode flag
784
+ :rtype: bool
785
+ """
786
+ return _parse_from_env(
787
+ name="is_multiuser_mode_enabled",
788
+ keys=["SUPERVISELY_MULTIUSER_APP_MODE"],
789
+ default=False,
790
+ raise_not_found=False,
791
+ postprocess_fn=flag_from_env,
792
+ )
793
+
794
+
795
+ def enable_multiuser_app_mode() -> None:
796
+ """
797
+ Enables multiuser app mode by setting the environment variable.
798
+ This function can be used to activate multiuser mode in the application allowing
799
+ separation of user DataJson/StateJson.
800
+ """
801
+ os.environ["SUPERVISELY_MULTIUSER_APP_MODE"] = "true"
802
+
803
+
804
+ def disable_multiuser_app_mode() -> None:
805
+ """Disables multiuser app mode by removing the environment variable."""
806
+ os.environ.pop("SUPERVISELY_MULTIUSER_APP_MODE", None)
807
+
808
+
809
+ def set_user_for_multiuser_app(user_id: Optional[Union[int, str]]) -> Token:
810
+ """
811
+ Sets the user ID for multiuser app mode by setting the environment variable.
812
+ This function should be used in multiuser mode to separate user DataJson/StateJson.
813
+
814
+ :param user_id: The user ID (or session key) to set for the current request.
815
+ :type user_id: int | str
816
+ :return: A context token that can be used to reset the user ID later.
817
+ :rtype: Token
818
+ :raises RuntimeError: If multiuser app mode is not enabled.
819
+ """
820
+ if not is_multiuser_mode_enabled():
821
+ raise RuntimeError("Multiuser app mode is not enabled. Cannot set user ID.")
822
+ return _MULTIUSER_USER_CTX.set(user_id)
823
+
824
+
825
+ def reset_user_for_multiuser_app(token: Token) -> None:
826
+ """
827
+ Resets the user ID for multiuser app mode using the provided context token.
828
+
829
+ :param token: Context token obtained from `set_user_for_multiuser_app`.
830
+ :type token: Token
831
+ """
832
+ if not is_multiuser_mode_enabled():
833
+ return
834
+ _MULTIUSER_USER_CTX.reset(token)
835
+
836
+
837
+ def user_from_multiuser_app() -> Optional[Union[int, str]]:
838
+ """
839
+ Retrieves the user ID for multiuser app mode from the environment variable.
840
+
841
+ :return: The user ID if set, otherwise None.
842
+ :rtype: Optional[Union[int, str]]
843
+ """
844
+ if not is_multiuser_mode_enabled():
845
+ return None
846
+ user_id = _MULTIUSER_USER_CTX.get(None)
847
+ if user_id is not None:
848
+ return user_id
@@ -771,7 +771,7 @@ class InferenceImageCache:
771
771
  def _download_many(
772
772
  self,
773
773
  indexes: List[Union[int, str]],
774
- name_cunstructor: Callable[[int], str],
774
+ name_constructor: Callable[[int], str],
775
775
  load_generator: Callable[
776
776
  [List[int]],
777
777
  Generator[Tuple[Union[int, str], np.ndarray], None, None],
@@ -785,24 +785,42 @@ class InferenceImageCache:
785
785
  all_frames = [None for _ in range(len(indexes))]
786
786
 
787
787
  def get_one_image(item):
788
- pos, index = item
788
+ pos, hash_or_id = item
789
789
  if video_id in self._cache:
790
- return pos, self.get_frame_from_cache(video_id, index)
791
- return pos, self._cache.get_image(name_cunstructor(index))
790
+ try:
791
+ frame = self.get_frame_from_cache(video_id, hash_or_id)
792
+ except Exception as e:
793
+ logger.error(
794
+ f"Error retrieving frame from cache: {repr(e)}. Frame will be re-downloaded",
795
+ exc_info=True,
796
+ )
797
+ ids_to_load.append(hash_or_id)
798
+ return pos, None
799
+ return pos, frame
800
+ try:
801
+ image = self._cache.get_image(name_constructor(hash_or_id))
802
+ except Exception as e:
803
+ logger.error(
804
+ f"Error retrieving image from cache: {repr(e)}. Image will be re-downloaded",
805
+ exc_info=True,
806
+ )
807
+ ids_to_load.append(hash_or_id)
808
+ return pos, None
809
+ return pos, image
792
810
 
793
811
  position = 0
794
812
  batch_size = 4
795
813
  for batch in batched(indexes, batch_size):
796
- indexes_to_load = []
814
+ ids_to_load = []
797
815
  items = []
798
816
  for hash_or_id in batch:
799
- name = name_cunstructor(hash_or_id)
817
+ name = name_constructor(hash_or_id)
800
818
  self._wait_if_in_queue(name, logger)
801
-
819
+ pos_by_name[name] = position
802
820
  if name not in self._cache and video_id not in self._cache:
803
821
  self._load_queue.set(name, hash_or_id)
804
- indexes_to_load.append(hash_or_id)
805
- pos_by_name[name] = position
822
+ ids_to_load.append(hash_or_id)
823
+
806
824
  elif return_images is True:
807
825
  items.append((position, hash_or_id))
808
826
  position += 1
@@ -810,14 +828,16 @@ class InferenceImageCache:
810
828
  if len(items) > 0:
811
829
  with ThreadPoolExecutor(min(64, len(items))) as executor:
812
830
  for pos, image in executor.map(get_one_image, items):
831
+ if image is None:
832
+ continue
813
833
  all_frames[pos] = image
814
834
  if progress_cb is not None:
815
835
  progress_cb()
816
836
 
817
837
  download_time = time.monotonic()
818
- if len(indexes_to_load) > 0:
819
- for id_or_hash, image in load_generator(indexes_to_load):
820
- name = name_cunstructor(id_or_hash)
838
+ if len(ids_to_load) > 0:
839
+ for id_or_hash, image in load_generator(ids_to_load):
840
+ name = name_constructor(id_or_hash)
821
841
  self._add_to_cache(name, image)
822
842
 
823
843
  if return_images:
@@ -828,13 +848,13 @@ class InferenceImageCache:
828
848
  download_time = time.monotonic() - download_time
829
849
 
830
850
  # logger.debug(f"All stored files: {sorted(os.listdir(self.tmp_path))}")
831
- if indexes_to_load:
832
- indexes_to_load = list(indexes_to_load)
851
+ if ids_to_load:
852
+ ids_to_load = list(ids_to_load)
833
853
  logger.debug(
834
- f"Images/Frames added to cache: {indexes_to_load} in {download_time:.2f} sec",
835
- extra={"indexes": indexes_to_load, "download_time": download_time},
854
+ f"Images/Frames added to cache: {ids_to_load} in {download_time:.2f} sec",
855
+ extra={"indexes": ids_to_load, "download_time": download_time},
836
856
  )
837
- found = set(batch).difference(indexes_to_load)
857
+ found = set(batch).difference(ids_to_load)
838
858
  if found:
839
859
  logger.debug(f"Images/Frames found in cache: {list(found)}")
840
860