supervisely 6.73.261__py3-none-any.whl → 6.73.262__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -1,11 +1,11 @@
1
1
  import json
2
2
  import shutil
3
+ import time
3
4
  from concurrent.futures import ThreadPoolExecutor, as_completed
4
5
  from enum import Enum
5
6
  from logging import Logger
6
7
  from pathlib import Path
7
8
  from threading import Lock, Thread
8
- from time import sleep
9
9
  from typing import Any, Callable, Generator, List, Optional, Tuple, Union
10
10
 
11
11
  import cv2
@@ -140,7 +140,7 @@ class PersistentImageTTLCache(TTLCache):
140
140
  self[video_id] = video_path
141
141
  if src_video_path != str(video_path):
142
142
  shutil.move(src_video_path, str(video_path))
143
- sly.logger.debug(f"Saved video to {video_path}")
143
+ sly.logger.debug(f"Video #{video_id} saved to {video_path}", extra={"video_id": video_id})
144
144
 
145
145
  def get_video_path(self, video_id: int) -> Path:
146
146
  return self[video_id]
@@ -197,12 +197,14 @@ class InferenceImageCache:
197
197
  ttl: int,
198
198
  is_persistent: bool = True,
199
199
  base_folder: str = sly.env.smart_cache_container_dir(),
200
+ log_progress: bool = False,
200
201
  ) -> None:
201
202
  self.is_persistent = is_persistent
202
203
  self._maxsize = maxsize
203
204
  self._ttl = ttl
204
205
  self._lock = Lock()
205
- self._load_queue = CacheOut(10 * 60)
206
+ self._load_queue = CacheOut(ttl=10 * 60)
207
+ self.log_progress = log_progress
206
208
 
207
209
  if is_persistent:
208
210
  self._data_dir = Path(base_folder)
@@ -342,11 +344,13 @@ class InferenceImageCache:
342
344
  Thread(
343
345
  target=self.download_video,
344
346
  args=(api, video_id),
345
- kwargs={"return_images": False},
347
+ kwargs={**kwargs, "return_images": False},
346
348
  ).start()
347
349
  elif redownload_video:
348
350
  Thread(
349
- target=self.download_video, args=(api, video_id), kwargs={"return_images": False}
351
+ target=self.download_video,
352
+ args=(api, video_id),
353
+ kwargs={**kwargs, "return_images": False},
350
354
  ).start()
351
355
 
352
356
  def name_constuctor(frame_index: int):
@@ -371,6 +375,7 @@ class InferenceImageCache:
371
375
  with self._lock:
372
376
  self._cache.save_video(video_id, str(video_path))
373
377
  self._load_queue.delete(video_id)
378
+ sly.logger.debug(f"Video #{video_id} added to cache", extra={"video_id": video_id})
374
379
  else:
375
380
  cap = cv2.VideoCapture(str(video_path))
376
381
  frame_index = 0
@@ -396,17 +401,57 @@ class InferenceImageCache:
396
401
  """
397
402
  return_images = kwargs.get("return_images", True)
398
403
  progress_cb = kwargs.get("progress_cb", None)
404
+ video_info = kwargs.get("video_info", api.video.get_info_by_id(video_id))
399
405
 
400
- video_info = api.video.get_info_by_id(video_id)
401
406
  self._wait_if_in_queue(video_id, api.logger)
402
407
  if not video_id in self._cache:
408
+ download_time = time.monotonic()
403
409
  self._load_queue.set(video_id, video_id)
404
- sly.logger.debug("Downloading video #%s", video_id)
405
- temp_video_path = Path("/tmp/smart_cache").joinpath(
406
- f"_{sly.rand_str(6)}_" + video_info.name
407
- )
408
- api.video.download_path(video_id, temp_video_path, progress_cb=progress_cb)
409
- self.add_video_to_cache(video_id, temp_video_path)
410
+ try:
411
+ sly.logger.debug("Downloading video #%s", video_id)
412
+ if progress_cb is None and self.log_progress:
413
+ size = video_info.file_meta.get("size", None)
414
+ if size is None:
415
+ size = "unknown"
416
+ else:
417
+ size = int(size)
418
+
419
+ prog_n = 0
420
+ prog_t = time.monotonic()
421
+
422
+ def _progress_cb(n):
423
+ nonlocal prog_n
424
+ nonlocal prog_t
425
+ prog_n += n
426
+ cur_t = time.monotonic()
427
+ if cur_t - prog_t > 3 or (isinstance(size, int) and prog_n >= size):
428
+ prog_t = cur_t
429
+ percent_str = ""
430
+ if isinstance(size, int):
431
+ percent_str = f" ({(prog_n*100) // size}%)"
432
+ prog_str = (
433
+ f"{(prog_n / 1000000):.2f}/{(size / 1000000):.2f} MB{percent_str}"
434
+ )
435
+ sly.logger.debug(
436
+ "Downloading video #%s: %s",
437
+ video_id,
438
+ prog_str,
439
+ )
440
+
441
+ progress_cb = _progress_cb
442
+ temp_video_path = Path("/tmp/smart_cache").joinpath(
443
+ f"_{sly.rand_str(6)}_" + video_info.name
444
+ )
445
+ api.video.download_path(video_id, temp_video_path, progress_cb=progress_cb)
446
+ self.add_video_to_cache(video_id, temp_video_path)
447
+ download_time = time.monotonic() - download_time
448
+ api.logger.debug(
449
+ f"Video #{video_id} downloaded to cache in {download_time:.2f} sec",
450
+ extra={"video_id": video_id, "download_time": download_time},
451
+ )
452
+ except Exception as e:
453
+ self._load_queue.delete(video_id)
454
+ raise e
410
455
  if return_images:
411
456
  return self.get_frames_from_cache(video_id, list(range(video_info.frames_count)))
412
457
 
@@ -664,6 +709,7 @@ class InferenceImageCache:
664
709
  for pos, image in executor.map(get_one_image, items):
665
710
  all_frames[pos] = image
666
711
 
712
+ download_time = time.monotonic()
667
713
  if len(indexes_to_load) > 0:
668
714
  for id_or_hash, image in load_generator(indexes_to_load):
669
715
  name = name_cunstructor(id_or_hash)
@@ -672,9 +718,13 @@ class InferenceImageCache:
672
718
  if return_images:
673
719
  pos = pos_by_name[name]
674
720
  all_frames[pos] = image
721
+ download_time = time.monotonic() - download_time
675
722
 
676
723
  # logger.debug(f"All stored files: {sorted(os.listdir(self.tmp_path))}")
677
- logger.debug(f"Images/Frames added to cache: {indexes_to_load}")
724
+ logger.debug(
725
+ f"Images/Frames added to cache: {indexes_to_load} in {download_time:.2f} sec",
726
+ extra={"indexes": indexes_to_load, "download_time": download_time},
727
+ )
678
728
  logger.debug(f"Images/Frames found in cache: {set(indexes).difference(indexes_to_load)}")
679
729
 
680
730
  if return_images:
@@ -686,8 +736,8 @@ class InferenceImageCache:
686
736
  logger.debug(f"Waiting for other task to load {name}")
687
737
 
688
738
  while name in self._load_queue:
689
- # TODO: sleep if slowdown
690
- sleep(0.1)
739
+ # TODO: time.sleep if slowdown
740
+ time.sleep(0.1)
691
741
  continue
692
742
 
693
743
  def download_frames_to_paths(self, api, video_id, frame_indexes, paths, progress_cb=None):
@@ -228,6 +228,7 @@ class Inference:
228
228
  ttl=env.smart_cache_ttl(),
229
229
  is_persistent=True,
230
230
  base_folder=env.smart_cache_container_dir(),
231
+ log_progress=True,
231
232
  )
232
233
 
233
234
  def get_batch_size(self):
@@ -1,6 +1,7 @@
1
1
  import functools
2
2
  import json
3
3
  import time
4
+ import uuid
4
5
  from pathlib import Path
5
6
  from queue import Queue
6
7
  from threading import Event, Thread
@@ -154,7 +155,8 @@ class BBoxTracking(Inference):
154
155
  raise
155
156
  stop_upload_event.set()
156
157
 
157
- def _track_api(self, api: sly.Api, context: dict):
158
+ def _track_api(self, api: sly.Api, context: dict, request_uuid: str = None):
159
+ track_t = time.monotonic()
158
160
  # unused fields:
159
161
  context["trackId"] = "auto"
160
162
  context["objectIds"] = []
@@ -193,15 +195,27 @@ class BBoxTracking(Inference):
193
195
  video_id=video_interface.video_id,
194
196
  )
195
197
 
196
- api.logger.info("Start tracking.")
197
-
198
198
  predictions = []
199
- for input_geom in input_bboxes:
199
+ frames_n = video_interface.frames_count
200
+ box_n = len(input_bboxes)
201
+ geom_t = time.monotonic()
202
+ api.logger.info(
203
+ "Start tracking.",
204
+ extra={
205
+ "video_id": video_interface.video_id,
206
+ "frame_range": range_of_frames,
207
+ "geometries_count": box_n,
208
+ "frames_count": frames_n,
209
+ "request_uuid": request_uuid,
210
+ },
211
+ )
212
+ for box_i, input_geom in enumerate(input_bboxes, 1):
200
213
  input_bbox = input_geom["data"]
201
214
  bbox = sly.Rectangle.from_json(input_bbox)
202
215
  predictions_for_object = []
203
216
  init = False
204
- for _ in video_interface.frames_loader_generator():
217
+ frame_t = time.monotonic()
218
+ for frame_i, _ in enumerate(video_interface.frames_loader_generator(), 1):
205
219
  imgs = video_interface.frames
206
220
  target = PredictionBBox(
207
221
  "", # TODO: can this be useful?
@@ -224,10 +238,40 @@ class BBoxTracking(Inference):
224
238
  predictions_for_object.append(
225
239
  {"type": sly_geometry.geometry_name(), "data": sly_geometry.to_json()}
226
240
  )
241
+ api.logger.debug(
242
+ "Frame processed. Geometry: [%d / %d]. Frame: [%d / %d]",
243
+ box_i,
244
+ box_n,
245
+ frame_i,
246
+ frames_n,
247
+ extra={
248
+ "geometry_index": box_i,
249
+ "frame_index": frame_i,
250
+ "processing_time": time.monotonic() - frame_t,
251
+ "request_uuid": request_uuid,
252
+ },
253
+ )
254
+ frame_t = time.monotonic()
255
+
227
256
  predictions.append(predictions_for_object)
257
+ api.logger.info(
258
+ "Geometry processed. Progress: [%d / %d]",
259
+ box_i,
260
+ box_n,
261
+ extra={
262
+ "geometry_index": box_i,
263
+ "processing_time": time.monotonic() - geom_t,
264
+ "request_uuid": request_uuid,
265
+ },
266
+ )
267
+ geom_t = time.monotonic()
228
268
 
229
269
  # predictions must be NxK bboxes: N=number of frames, K=number of objects
230
270
  predictions = list(map(list, zip(*predictions)))
271
+ api.logger.info(
272
+ "Tracking finished.",
273
+ extra={"tracking_time": time.monotonic() - track_t, "request_uuid": request_uuid},
274
+ )
231
275
  return predictions
232
276
 
233
277
  def _inference(self, frames: List[np.ndarray], geometries: List[Geometry], settings: dict):
@@ -322,8 +366,19 @@ class BBoxTracking(Inference):
322
366
 
323
367
  @server.post("/track-api")
324
368
  def track_api(request: Request):
325
- sly.logger.info("Start tracking.")
326
- return self._track_api(request.state.api, request.state.context)
369
+ inference_request_uuid = uuid.uuid5(
370
+ namespace=uuid.NAMESPACE_URL, name=f"{time.time()}"
371
+ ).hex
372
+ sly.logger.info(
373
+ "Received track-api request.", extra={"request_uuid": inference_request_uuid}
374
+ )
375
+ result = self._track_api(
376
+ request.state.api, request.state.context, request_uuid=inference_request_uuid
377
+ )
378
+ sly.logger.info(
379
+ "Track-api request processed.", extra={"request_uuid": inference_request_uuid}
380
+ )
381
+ return result
327
382
 
328
383
  @server.post("/track-api-files")
329
384
  def track_api_files(
@@ -73,7 +73,7 @@ class TrackerInterface:
73
73
 
74
74
  self.geometries[start_fig] = geometries[-1]
75
75
 
76
- def frames_loader_generator(self, batch_size=16) -> Generator[None, None, None]:
76
+ def frames_loader_generator(self, batch_size=4) -> Generator[None, None, None]:
77
77
  if self.load_all_frames:
78
78
  self._cur_frames_indexes = self.frames_indexes
79
79
  yield
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.261
3
+ Version: 6.73.262
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -858,8 +858,8 @@ supervisely/nn/benchmark/visualization/widgets/sidebar/sidebar.py,sha256=tKPURRS
858
858
  supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
859
859
  supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
860
860
  supervisely/nn/inference/__init__.py,sha256=mtEci4Puu-fRXDnGn8RP47o97rv3VTE0hjbYO34Zwqg,1622
861
- supervisely/nn/inference/cache.py,sha256=KvzCgMbEBLdiJAxJDLicIPKAlYb52P9_kpNPWfiVY8Y,28194
862
- supervisely/nn/inference/inference.py,sha256=acNJH-1UthvJgyhI-9KJv8hzo8LjOVVdXPkoEuwfRLg,128864
861
+ supervisely/nn/inference/cache.py,sha256=_pPSpkl8Wkqkiidn0vu6kWE19cngd80av--jncHxMEQ,30510
862
+ supervisely/nn/inference/inference.py,sha256=8MrOen2oyYIKiVqy0WbBTwABJZss9MLQ70EwX0e_-es,128895
863
863
  supervisely/nn/inference/session.py,sha256=jmkkxbe2kH-lEgUU6Afh62jP68dxfhF5v6OGDfLU62E,35757
864
864
  supervisely/nn/inference/video_inference.py,sha256=8Bshjr6rDyLay5Za8IB8Dr6FURMO2R_v7aELasO8pR4,5746
865
865
  supervisely/nn/inference/gui/__init__.py,sha256=wCxd-lF5Zhcwsis-wScDA8n1Gk_1O00PKgDviUZ3F1U,221
@@ -892,13 +892,13 @@ supervisely/nn/inference/salient_object_segmentation/salient_object_segmentation
892
892
  supervisely/nn/inference/semantic_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
893
893
  supervisely/nn/inference/semantic_segmentation/semantic_segmentation.py,sha256=xpmViSYm1v_ZxlYyqiD_DiB7_LEynv9ZoU0t2QHEx8A,3370
894
894
  supervisely/nn/inference/tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
895
- supervisely/nn/inference/tracking/bbox_tracking.py,sha256=URWGoYrCRN3yC4MyLx2_eZziZYwX6mPA8OLJqUOsAgA,15307
895
+ supervisely/nn/inference/tracking/bbox_tracking.py,sha256=EBEwLczXugCSYaIUFxB33h7SOIkPDEBWLhr_TbASNOA,17441
896
896
  supervisely/nn/inference/tracking/functional.py,sha256=LpVu2gvOOpr9D_uvwTPZey1wUCAhV-E20RPKmCSIrK4,1774
897
897
  supervisely/nn/inference/tracking/mask_tracking.py,sha256=qL9eUSqhzJwJMYaAzXX31oOu9EgdnGbsNwK9pOlV148,19610
898
898
  supervisely/nn/inference/tracking/object_tracking_3d.py,sha256=Kqvx1qe1G8F1VtdBiy2HJ251rJU6s3LWhj0ZedhrmUw,4327
899
899
  supervisely/nn/inference/tracking/point_tracking.py,sha256=Dweiq3dJUuwlFYnJbyx28L3IisNeg-1KQf2mBHrr7yI,22050
900
900
  supervisely/nn/inference/tracking/tracker3d_interface.py,sha256=7yIkNO9rgkzQuyXUUccLwqlv5k7RPbxTqz9uI4FylLE,2781
901
- supervisely/nn/inference/tracking/tracker_interface.py,sha256=MJFzSpQvhC0F-HoryRuoDinO_5xS5oFvT5zDE94ZyKY,10767
901
+ supervisely/nn/inference/tracking/tracker_interface.py,sha256=FXI9f0I5Tb5HN7l8fvxJ5wJ-QYuKyxfXiDpfXRLsSq4,10766
902
902
  supervisely/nn/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
903
903
  supervisely/nn/legacy/config.py,sha256=TKdyGtURJKzKoyydCZAfujoUnbC0SO8GeVLTSnoyS_w,2994
904
904
  supervisely/nn/legacy/dataset.py,sha256=-56EI6OYbkTWx4y8hOgD76y47zUoJNjGFyZ6JaP8iqg,6055
@@ -1057,9 +1057,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1057
1057
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1058
1058
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1059
1059
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1060
- supervisely-6.73.261.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1061
- supervisely-6.73.261.dist-info/METADATA,sha256=-ebDT49QDbB4Ly0fCqehbeu-_SN7bLV1lObohSxHS4M,33573
1062
- supervisely-6.73.261.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1063
- supervisely-6.73.261.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1064
- supervisely-6.73.261.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1065
- supervisely-6.73.261.dist-info/RECORD,,
1060
+ supervisely-6.73.262.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1061
+ supervisely-6.73.262.dist-info/METADATA,sha256=aMuL624leouIzMWfrL4bx-4A0hXHCT1hTTSHd9YYVbw,33573
1062
+ supervisely-6.73.262.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1063
+ supervisely-6.73.262.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1064
+ supervisely-6.73.262.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1065
+ supervisely-6.73.262.dist-info/RECORD,,