supervisely 6.73.260__py3-none-any.whl → 6.73.262__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -19,6 +19,8 @@ from supervisely.app.widgets import (
19
19
  )
20
20
  from supervisely.io.fs import get_file_name_with_ext
21
21
  from supervisely.nn.experiments import ExperimentInfo
22
+ from supervisely.nn.utils import ModelSource
23
+
22
24
 
23
25
  WEIGHTS_DIR = "weights"
24
26
 
@@ -473,6 +475,17 @@ class ExperimentSelector(Widget):
473
475
  full_model_files["checkpoint"] = self.get_selected_checkpoint_path()
474
476
  return full_model_files
475
477
 
478
+ def get_deploy_params(self) -> Dict[str, Any]:
479
+ """
480
+ Returns a dictionary with deploy parameters except runtime and device keys.
481
+ """
482
+ deploy_params = {
483
+ "model_source": ModelSource.CUSTOM,
484
+ "model_files": self.get_model_files(),
485
+ "model_info": self.get_selected_experiment_info(),
486
+ }
487
+ return deploy_params
488
+
476
489
  def set_active_row(self, row_index: int) -> None:
477
490
  if row_index < 0 or row_index > len(self._rows) - 1:
478
491
  raise ValueError(f'Row with index "{row_index}" does not exist')
@@ -1,9 +1,10 @@
1
- from typing import Dict, List, Union
1
+ from typing import Dict, List, Literal, Union
2
2
 
3
3
  from supervisely.api.api import Api
4
4
  from supervisely.app.content import DataJson, StateJson
5
5
  from supervisely.app.widgets import Widget
6
6
  from supervisely.io.fs import get_file_ext
7
+ from supervisely.nn.utils import ModelSource
7
8
 
8
9
 
9
10
  class PretrainedModelsSelector(Widget):
@@ -134,39 +135,71 @@ class PretrainedModelsSelector(Widget):
134
135
  selected_row_index = int(widget_actual_state["selectedRow"])
135
136
  return models[selected_row_index]
136
137
 
137
- def get_selected_model_params(self, model_name_column: str = "Model") -> Union[Dict, None]:
138
+ def get_selected_model_params(
139
+ self,
140
+ model_name_column: str = "Model",
141
+ train_version: Literal["v1", "v2"] = "v1",
142
+ ) -> Union[Dict, None]:
138
143
  selected_model = self.get_selected_row()
139
144
  if selected_model is None:
140
145
  return {}
141
146
 
142
- model_name = selected_model.get(model_name_column)
143
- if model_name is None:
144
- raise ValueError(
145
- "Could not find model name. Make sure you have column 'Model' in your models list."
146
- )
147
- checkpoint_url = selected_model.get("meta", {}).get("weights_url")
148
- if checkpoint_url is None:
149
- pass
150
-
151
- checkpoint_ext = get_file_ext(checkpoint_url)
152
- checkpoint_name = f"{model_name.lower()}{checkpoint_ext}"
153
-
154
- task_type = self.get_selected_task_type()
155
- model_params = {
156
- "model_source": "Pretrained models",
157
- "task_type": task_type,
158
- "checkpoint_name": checkpoint_name,
159
- "checkpoint_url": checkpoint_url,
160
- }
161
-
162
- if len(self._arch_types) > 1:
163
- arch_type = self.get_selected_arch_type()
164
- model_params["arch_type"] = arch_type
165
-
166
- config_url = selected_model.get("meta", {}).get("config_url")
167
- if config_url is not None:
168
- model_params["config_url"] = config_url
147
+ if train_version == "v1":
148
+ model_name = selected_model.get(model_name_column)
149
+ if model_name is None:
150
+ raise ValueError(
151
+ "Could not find model name. Make sure you have column 'Model' in your models list."
152
+ )
153
+
154
+ model_meta = selected_model.get("meta")
155
+ if model_meta is None:
156
+ raise ValueError(
157
+ "Could not find model meta. Make sure you have key 'meta' in your models configuration list."
158
+ )
159
+ checkpoint_url = model_meta.get("weights_url")
160
+ if checkpoint_url is None:
161
+ model_files = model_meta.get("model_files")
162
+ if model_files is None:
163
+ raise ValueError(
164
+ "Could not find model files. Make sure you have key 'model_files' or 'weights_url' in 'meta' in your models configuration list."
165
+ )
166
+ checkpoint_url = model_files.get("checkpoint")
167
+ if checkpoint_url is None:
168
+ raise ValueError(
169
+ "Could not find checkpoint url. Make sure you have key 'checkpoint' in 'model_files' in 'meta' in your models configuration list."
170
+ )
171
+
172
+ checkpoint_ext = get_file_ext(checkpoint_url)
173
+ checkpoint_name = f"{model_name.lower()}{checkpoint_ext}"
174
+
175
+ task_type = self.get_selected_task_type()
176
+ model_params = {
177
+ "model_source": "Pretrained models",
178
+ "task_type": task_type,
179
+ "checkpoint_name": checkpoint_name,
180
+ "checkpoint_url": checkpoint_url,
181
+ }
169
182
 
183
+ if len(self._arch_types) > 1:
184
+ arch_type = self.get_selected_arch_type()
185
+ model_params["arch_type"] = arch_type
186
+
187
+ config_url = selected_model.get("meta", {}).get("config_url")
188
+ if config_url is not None:
189
+ model_params["config_url"] = config_url
190
+ elif train_version == "v2":
191
+ model_info = self.get_selected_row()
192
+ meta = model_info.get("meta")
193
+ if meta is None:
194
+ raise ValueError("key 'meta' not found in model configuration")
195
+ model_files = meta.get("model_files")
196
+ if model_files is None:
197
+ raise ValueError("key 'model_files' not found in key 'meta' in model configuration")
198
+ model_params = {
199
+ "model_source": ModelSource.PRETRAINED,
200
+ "model_info": model_info,
201
+ "model_files": model_files,
202
+ }
170
203
  return model_params
171
204
 
172
205
  def get_selected_row_index(self, state=StateJson()) -> Union[int, None]:
@@ -536,58 +536,62 @@ class BaseTrainArtifacts:
536
536
  api: Api, train_info: TrainInfo
537
537
  ) -> ExperimentInfo:
538
538
 
539
- checkpoints = []
540
- for chk in train_info.checkpoints:
541
- if self.weights_folder:
542
- checkpoints.append(join(self.weights_folder, chk.name))
543
- else:
544
- checkpoints.append(chk.name)
545
-
546
- best_checkpoint = next(
547
- (chk.name for chk in train_info.checkpoints if "best" in chk.name), None
548
- )
549
- if not best_checkpoint and checkpoints:
550
- best_checkpoint = get_file_name_with_ext(checkpoints[-1])
551
-
552
- task_info = api.task.get_info_by_id(train_info.task_id)
553
- workspace_id = task_info["workspaceId"]
554
-
555
- project = api.project.get_info_by_name(workspace_id, train_info.project_name)
556
- project_id = project.id if project else None
557
-
558
- model_files = {}
559
- if train_info.config_path:
560
- model_files["config"] = self.get_config_path(train_info.artifacts_folder).replace(
561
- train_info.artifacts_folder, ""
539
+ try:
540
+ checkpoints = []
541
+ for chk in train_info.checkpoints:
542
+ if self.weights_folder:
543
+ checkpoints.append(join(self.weights_folder, chk.name))
544
+ else:
545
+ checkpoints.append(chk.name)
546
+
547
+ best_checkpoint = next(
548
+ (chk.name for chk in train_info.checkpoints if "best" in chk.name), None
562
549
  )
563
-
564
- input_datetime = task_info["startedAt"]
565
- parsed_datetime = datetime.strptime(input_datetime, "%Y-%m-%dT%H:%M:%S.%fZ")
566
- date_time = parsed_datetime.strftime("%Y-%m-%d %H:%M:%S")
567
-
568
- experiment_info_data = {
569
- "experiment_name": f"Unknown {self.framework_name} experiment",
570
- "framework_name": self.framework_name,
571
- "model_name": f"Unknown {self.framework_name} model",
572
- "task_type": train_info.task_type,
573
- "project_id": project_id,
574
- "task_id": train_info.task_id,
575
- "model_files": model_files,
576
- "checkpoints": checkpoints,
577
- "best_checkpoint": best_checkpoint,
578
- "artifacts_dir": train_info.artifacts_folder,
579
- "datetime": date_time,
580
- }
581
-
582
- experiment_info_fields = {
583
- field.name
584
- for field in ExperimentInfo.__dataclass_fields__.values() # pylint: disable=no-member
585
- }
586
- for field in experiment_info_fields:
587
- if field not in experiment_info_data:
588
- experiment_info_data[field] = None
589
-
590
- return ExperimentInfo(**experiment_info_data)
550
+ if not best_checkpoint and checkpoints:
551
+ best_checkpoint = get_file_name_with_ext(checkpoints[-1])
552
+
553
+ task_info = api.task.get_info_by_id(train_info.task_id)
554
+ workspace_id = task_info["workspaceId"]
555
+
556
+ project = api.project.get_info_by_name(workspace_id, train_info.project_name)
557
+ project_id = project.id if project else None
558
+
559
+ model_files = {}
560
+ if train_info.config_path:
561
+ model_files["config"] = self.get_config_path(
562
+ train_info.artifacts_folder
563
+ ).replace(train_info.artifacts_folder, "")
564
+
565
+ input_datetime = task_info["startedAt"]
566
+ parsed_datetime = datetime.strptime(input_datetime, "%Y-%m-%dT%H:%M:%S.%fZ")
567
+ date_time = parsed_datetime.strftime("%Y-%m-%d %H:%M:%S")
568
+
569
+ experiment_info_data = {
570
+ "experiment_name": f"Unknown {self.framework_name} experiment",
571
+ "framework_name": self.framework_name,
572
+ "model_name": f"Unknown {self.framework_name} model",
573
+ "task_type": train_info.task_type,
574
+ "project_id": project_id,
575
+ "task_id": train_info.task_id,
576
+ "model_files": model_files,
577
+ "checkpoints": checkpoints,
578
+ "best_checkpoint": best_checkpoint,
579
+ "artifacts_dir": train_info.artifacts_folder,
580
+ "datetime": date_time,
581
+ }
582
+
583
+ experiment_info_fields = {
584
+ field.name
585
+ for field in ExperimentInfo.__dataclass_fields__.values() # pylint: disable=no-member
586
+ }
587
+ for field in experiment_info_fields:
588
+ if field not in experiment_info_data:
589
+ experiment_info_data[field] = None
590
+
591
+ return ExperimentInfo(**experiment_info_data)
592
+ except Exception as e:
593
+ logger.warning(f"Failed to build experiment info: {e}")
594
+ return None
591
595
 
592
596
  train_infos = self.get_list(sort)
593
597
 
@@ -1,11 +1,11 @@
1
1
  import json
2
2
  import shutil
3
+ import time
3
4
  from concurrent.futures import ThreadPoolExecutor, as_completed
4
5
  from enum import Enum
5
6
  from logging import Logger
6
7
  from pathlib import Path
7
8
  from threading import Lock, Thread
8
- from time import sleep
9
9
  from typing import Any, Callable, Generator, List, Optional, Tuple, Union
10
10
 
11
11
  import cv2
@@ -140,7 +140,7 @@ class PersistentImageTTLCache(TTLCache):
140
140
  self[video_id] = video_path
141
141
  if src_video_path != str(video_path):
142
142
  shutil.move(src_video_path, str(video_path))
143
- sly.logger.debug(f"Saved video to {video_path}")
143
+ sly.logger.debug(f"Video #{video_id} saved to {video_path}", extra={"video_id": video_id})
144
144
 
145
145
  def get_video_path(self, video_id: int) -> Path:
146
146
  return self[video_id]
@@ -197,12 +197,14 @@ class InferenceImageCache:
197
197
  ttl: int,
198
198
  is_persistent: bool = True,
199
199
  base_folder: str = sly.env.smart_cache_container_dir(),
200
+ log_progress: bool = False,
200
201
  ) -> None:
201
202
  self.is_persistent = is_persistent
202
203
  self._maxsize = maxsize
203
204
  self._ttl = ttl
204
205
  self._lock = Lock()
205
- self._load_queue = CacheOut(10 * 60)
206
+ self._load_queue = CacheOut(ttl=10 * 60)
207
+ self.log_progress = log_progress
206
208
 
207
209
  if is_persistent:
208
210
  self._data_dir = Path(base_folder)
@@ -342,11 +344,13 @@ class InferenceImageCache:
342
344
  Thread(
343
345
  target=self.download_video,
344
346
  args=(api, video_id),
345
- kwargs={"return_images": False},
347
+ kwargs={**kwargs, "return_images": False},
346
348
  ).start()
347
349
  elif redownload_video:
348
350
  Thread(
349
- target=self.download_video, args=(api, video_id), kwargs={"return_images": False}
351
+ target=self.download_video,
352
+ args=(api, video_id),
353
+ kwargs={**kwargs, "return_images": False},
350
354
  ).start()
351
355
 
352
356
  def name_constuctor(frame_index: int):
@@ -371,6 +375,7 @@ class InferenceImageCache:
371
375
  with self._lock:
372
376
  self._cache.save_video(video_id, str(video_path))
373
377
  self._load_queue.delete(video_id)
378
+ sly.logger.debug(f"Video #{video_id} added to cache", extra={"video_id": video_id})
374
379
  else:
375
380
  cap = cv2.VideoCapture(str(video_path))
376
381
  frame_index = 0
@@ -396,17 +401,57 @@ class InferenceImageCache:
396
401
  """
397
402
  return_images = kwargs.get("return_images", True)
398
403
  progress_cb = kwargs.get("progress_cb", None)
404
+ video_info = kwargs.get("video_info", api.video.get_info_by_id(video_id))
399
405
 
400
- video_info = api.video.get_info_by_id(video_id)
401
406
  self._wait_if_in_queue(video_id, api.logger)
402
407
  if not video_id in self._cache:
408
+ download_time = time.monotonic()
403
409
  self._load_queue.set(video_id, video_id)
404
- sly.logger.debug("Downloading video #%s", video_id)
405
- temp_video_path = Path("/tmp/smart_cache").joinpath(
406
- f"_{sly.rand_str(6)}_" + video_info.name
407
- )
408
- api.video.download_path(video_id, temp_video_path, progress_cb=progress_cb)
409
- self.add_video_to_cache(video_id, temp_video_path)
410
+ try:
411
+ sly.logger.debug("Downloading video #%s", video_id)
412
+ if progress_cb is None and self.log_progress:
413
+ size = video_info.file_meta.get("size", None)
414
+ if size is None:
415
+ size = "unknown"
416
+ else:
417
+ size = int(size)
418
+
419
+ prog_n = 0
420
+ prog_t = time.monotonic()
421
+
422
+ def _progress_cb(n):
423
+ nonlocal prog_n
424
+ nonlocal prog_t
425
+ prog_n += n
426
+ cur_t = time.monotonic()
427
+ if cur_t - prog_t > 3 or (isinstance(size, int) and prog_n >= size):
428
+ prog_t = cur_t
429
+ percent_str = ""
430
+ if isinstance(size, int):
431
+ percent_str = f" ({(prog_n*100) // size}%)"
432
+ prog_str = (
433
+ f"{(prog_n / 1000000):.2f}/{(size / 1000000):.2f} MB{percent_str}"
434
+ )
435
+ sly.logger.debug(
436
+ "Downloading video #%s: %s",
437
+ video_id,
438
+ prog_str,
439
+ )
440
+
441
+ progress_cb = _progress_cb
442
+ temp_video_path = Path("/tmp/smart_cache").joinpath(
443
+ f"_{sly.rand_str(6)}_" + video_info.name
444
+ )
445
+ api.video.download_path(video_id, temp_video_path, progress_cb=progress_cb)
446
+ self.add_video_to_cache(video_id, temp_video_path)
447
+ download_time = time.monotonic() - download_time
448
+ api.logger.debug(
449
+ f"Video #{video_id} downloaded to cache in {download_time:.2f} sec",
450
+ extra={"video_id": video_id, "download_time": download_time},
451
+ )
452
+ except Exception as e:
453
+ self._load_queue.delete(video_id)
454
+ raise e
410
455
  if return_images:
411
456
  return self.get_frames_from_cache(video_id, list(range(video_info.frames_count)))
412
457
 
@@ -664,6 +709,7 @@ class InferenceImageCache:
664
709
  for pos, image in executor.map(get_one_image, items):
665
710
  all_frames[pos] = image
666
711
 
712
+ download_time = time.monotonic()
667
713
  if len(indexes_to_load) > 0:
668
714
  for id_or_hash, image in load_generator(indexes_to_load):
669
715
  name = name_cunstructor(id_or_hash)
@@ -672,9 +718,13 @@ class InferenceImageCache:
672
718
  if return_images:
673
719
  pos = pos_by_name[name]
674
720
  all_frames[pos] = image
721
+ download_time = time.monotonic() - download_time
675
722
 
676
723
  # logger.debug(f"All stored files: {sorted(os.listdir(self.tmp_path))}")
677
- logger.debug(f"Images/Frames added to cache: {indexes_to_load}")
724
+ logger.debug(
725
+ f"Images/Frames added to cache: {indexes_to_load} in {download_time:.2f} sec",
726
+ extra={"indexes": indexes_to_load, "download_time": download_time},
727
+ )
678
728
  logger.debug(f"Images/Frames found in cache: {set(indexes).difference(indexes_to_load)}")
679
729
 
680
730
  if return_images:
@@ -686,8 +736,8 @@ class InferenceImageCache:
686
736
  logger.debug(f"Waiting for other task to load {name}")
687
737
 
688
738
  while name in self._load_queue:
689
- # TODO: sleep if slowdown
690
- sleep(0.1)
739
+ # TODO: time.sleep if slowdown
740
+ time.sleep(0.1)
691
741
  continue
692
742
 
693
743
  def download_frames_to_paths(self, api, video_id, frame_indexes, paths, progress_cb=None):
@@ -228,6 +228,7 @@ class Inference:
228
228
  ttl=env.smart_cache_ttl(),
229
229
  is_persistent=True,
230
230
  base_folder=env.smart_cache_container_dir(),
231
+ log_progress=True,
231
232
  )
232
233
 
233
234
  def get_batch_size(self):
@@ -571,7 +572,9 @@ class Inference:
571
572
  ) as download_pbar:
572
573
  self.gui.download_progress.show()
573
574
  sly_fs.download(
574
- url=file_url, save_path=file_path, progress=download_pbar.update
575
+ url=file_url,
576
+ save_path=file_path,
577
+ progress=download_pbar.update,
575
578
  )
576
579
  local_model_files[file] = file_path
577
580
  else:
@@ -590,6 +593,10 @@ class Inference:
590
593
  for file in model_files:
591
594
  file_url = model_files[file]
592
595
  file_info = self.api.file.get_info_by_path(team_id, file_url)
596
+ if file_info is None:
597
+ raise FileNotFoundError(
598
+ f"File '{file_url}' not found in Team Files. Make sure the file exists."
599
+ )
593
600
  file_size = file_info.sizeb
594
601
  file_name = os.path.basename(file_url)
595
602
  file_path = os.path.join(self.model_dir, file_name)
@@ -1374,7 +1381,11 @@ class Inference:
1374
1381
  return result
1375
1382
 
1376
1383
  def _inference_images_ids(
1377
- self, api: Api, state: dict, images_ids: List[int], async_inference_request_uuid: str = None
1384
+ self,
1385
+ api: Api,
1386
+ state: dict,
1387
+ images_ids: List[int],
1388
+ async_inference_request_uuid: str = None,
1378
1389
  ):
1379
1390
  """Inference images by ids.
1380
1391
  If "output_project_id" in state, upload images and annotations to the output project.
@@ -1799,7 +1810,8 @@ class Inference:
1799
1810
  dataset_id = _get_or_create_new_dataset(output_project_id, src_dataset_id)
1800
1811
  image_names = [result["image_name"] for result in results]
1801
1812
  image_infos = api.image.get_list(
1802
- dataset_id, filters=[{"field": "name", "operator": "in", "value": image_names}]
1813
+ dataset_id,
1814
+ filters=[{"field": "name", "operator": "in", "value": image_names}],
1803
1815
  )
1804
1816
  meta_changed = False
1805
1817
  anns = []
@@ -2104,7 +2116,10 @@ class Inference:
2104
2116
  # Read images
2105
2117
  if cache_project_on_model:
2106
2118
  images_nps = []
2107
- for dataset_id, images_infos in images_infos_batch_by_dataset.items():
2119
+ for (
2120
+ dataset_id,
2121
+ images_infos,
2122
+ ) in images_infos_batch_by_dataset.items():
2108
2123
  dataset_info = datasets_infos_dict[dataset_id]
2109
2124
  images_paths, _ = zip(
2110
2125
  *read_from_cached_project(
@@ -2116,7 +2131,10 @@ class Inference:
2116
2131
  images_nps.extend([sly_image.read(path) for path in images_paths])
2117
2132
  else:
2118
2133
  images_nps = []
2119
- for dataset_id, images_infos in images_infos_batch_by_dataset.items():
2134
+ for (
2135
+ dataset_id,
2136
+ images_infos,
2137
+ ) in images_infos_batch_by_dataset.items():
2120
2138
  images_nps.extend(
2121
2139
  self.cache.download_images(
2122
2140
  api,
@@ -2845,7 +2863,8 @@ def update_meta_and_ann(meta: ProjectMeta, ann: Annotation):
2845
2863
  """Update project meta and annotation to match each other
2846
2864
  If obj class or tag meta from annotation conflicts with project meta
2847
2865
  add suffix to obj class or tag meta.
2848
- Return tuple of updated project meta, annotation and boolean flag if meta was changed."""
2866
+ Return tuple of updated project meta, annotation and boolean flag if meta was changed.
2867
+ """
2849
2868
  obj_classes_suffixes = ["_nn"]
2850
2869
  tag_meta_suffixes = ["_nn"]
2851
2870
  ann_obj_classes = {}
@@ -1,6 +1,7 @@
1
1
  import functools
2
2
  import json
3
3
  import time
4
+ import uuid
4
5
  from pathlib import Path
5
6
  from queue import Queue
6
7
  from threading import Event, Thread
@@ -154,7 +155,8 @@ class BBoxTracking(Inference):
154
155
  raise
155
156
  stop_upload_event.set()
156
157
 
157
- def _track_api(self, api: sly.Api, context: dict):
158
+ def _track_api(self, api: sly.Api, context: dict, request_uuid: str = None):
159
+ track_t = time.monotonic()
158
160
  # unused fields:
159
161
  context["trackId"] = "auto"
160
162
  context["objectIds"] = []
@@ -193,15 +195,27 @@ class BBoxTracking(Inference):
193
195
  video_id=video_interface.video_id,
194
196
  )
195
197
 
196
- api.logger.info("Start tracking.")
197
-
198
198
  predictions = []
199
- for input_geom in input_bboxes:
199
+ frames_n = video_interface.frames_count
200
+ box_n = len(input_bboxes)
201
+ geom_t = time.monotonic()
202
+ api.logger.info(
203
+ "Start tracking.",
204
+ extra={
205
+ "video_id": video_interface.video_id,
206
+ "frame_range": range_of_frames,
207
+ "geometries_count": box_n,
208
+ "frames_count": frames_n,
209
+ "request_uuid": request_uuid,
210
+ },
211
+ )
212
+ for box_i, input_geom in enumerate(input_bboxes, 1):
200
213
  input_bbox = input_geom["data"]
201
214
  bbox = sly.Rectangle.from_json(input_bbox)
202
215
  predictions_for_object = []
203
216
  init = False
204
- for _ in video_interface.frames_loader_generator():
217
+ frame_t = time.monotonic()
218
+ for frame_i, _ in enumerate(video_interface.frames_loader_generator(), 1):
205
219
  imgs = video_interface.frames
206
220
  target = PredictionBBox(
207
221
  "", # TODO: can this be useful?
@@ -224,10 +238,40 @@ class BBoxTracking(Inference):
224
238
  predictions_for_object.append(
225
239
  {"type": sly_geometry.geometry_name(), "data": sly_geometry.to_json()}
226
240
  )
241
+ api.logger.debug(
242
+ "Frame processed. Geometry: [%d / %d]. Frame: [%d / %d]",
243
+ box_i,
244
+ box_n,
245
+ frame_i,
246
+ frames_n,
247
+ extra={
248
+ "geometry_index": box_i,
249
+ "frame_index": frame_i,
250
+ "processing_time": time.monotonic() - frame_t,
251
+ "request_uuid": request_uuid,
252
+ },
253
+ )
254
+ frame_t = time.monotonic()
255
+
227
256
  predictions.append(predictions_for_object)
257
+ api.logger.info(
258
+ "Geometry processed. Progress: [%d / %d]",
259
+ box_i,
260
+ box_n,
261
+ extra={
262
+ "geometry_index": box_i,
263
+ "processing_time": time.monotonic() - geom_t,
264
+ "request_uuid": request_uuid,
265
+ },
266
+ )
267
+ geom_t = time.monotonic()
228
268
 
229
269
  # predictions must be NxK bboxes: N=number of frames, K=number of objects
230
270
  predictions = list(map(list, zip(*predictions)))
271
+ api.logger.info(
272
+ "Tracking finished.",
273
+ extra={"tracking_time": time.monotonic() - track_t, "request_uuid": request_uuid},
274
+ )
231
275
  return predictions
232
276
 
233
277
  def _inference(self, frames: List[np.ndarray], geometries: List[Geometry], settings: dict):
@@ -322,8 +366,19 @@ class BBoxTracking(Inference):
322
366
 
323
367
  @server.post("/track-api")
324
368
  def track_api(request: Request):
325
- sly.logger.info("Start tracking.")
326
- return self._track_api(request.state.api, request.state.context)
369
+ inference_request_uuid = uuid.uuid5(
370
+ namespace=uuid.NAMESPACE_URL, name=f"{time.time()}"
371
+ ).hex
372
+ sly.logger.info(
373
+ "Received track-api request.", extra={"request_uuid": inference_request_uuid}
374
+ )
375
+ result = self._track_api(
376
+ request.state.api, request.state.context, request_uuid=inference_request_uuid
377
+ )
378
+ sly.logger.info(
379
+ "Track-api request processed.", extra={"request_uuid": inference_request_uuid}
380
+ )
381
+ return result
327
382
 
328
383
  @server.post("/track-api-files")
329
384
  def track_api_files(
@@ -73,7 +73,7 @@ class TrackerInterface:
73
73
 
74
74
  self.geometries[start_fig] = geometries[-1]
75
75
 
76
- def frames_loader_generator(self, batch_size=16) -> Generator[None, None, None]:
76
+ def frames_loader_generator(self, batch_size=4) -> Generator[None, None, None]:
77
77
  if self.load_all_frames:
78
78
  self._cur_frames_indexes = self.frames_indexes
79
79
  yield
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.260
3
+ Version: 6.73.262
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -250,7 +250,7 @@ supervisely/app/widgets/empty/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
250
250
  supervisely/app/widgets/empty/empty.py,sha256=fCr8I7CQ2XLo59bl2txjDrblOGiu0TzUcM-Pq6s7gKY,1285
251
251
  supervisely/app/widgets/empty/template.html,sha256=aDBKkin5aLuqByzNN517-rTYCGIg5SPKgnysYMPYjv8,40
252
252
  supervisely/app/widgets/experiment_selector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
253
- supervisely/app/widgets/experiment_selector/experiment_selector.py,sha256=XZxhpD3utmHsHhKxo34-wJIfOaKXXznrFQVnVaHVWik,19456
253
+ supervisely/app/widgets/experiment_selector/experiment_selector.py,sha256=D1RrMuA6xXg8M5DyBUKro-AUqj3oFNMIlo6pZUR0SQY,19889
254
254
  supervisely/app/widgets/experiment_selector/style.css,sha256=-zPPXHnJvatYj_xVVAb7T8uoSsUTyhm5xCKWkkFQ78E,548
255
255
  supervisely/app/widgets/experiment_selector/template.html,sha256=k7f_Xl6nDUXXwu6IY_RblYni5TbZRRxCBduY5O_SyFs,2908
256
256
  supervisely/app/widgets/fast_table/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -386,7 +386,7 @@ supervisely/app/widgets/pagination/template.html,sha256=1z9pt2SOTjA5Kmt8YjSiyO8X
386
386
  supervisely/app/widgets/pie_chart/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
387
387
  supervisely/app/widgets/pie_chart/pie_chart.py,sha256=E0erw7kPXiudJzNhWMKdOnMgK9u6yYlPw10noKoO2jw,7809
388
388
  supervisely/app/widgets/pretrained_models_selector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
389
- supervisely/app/widgets/pretrained_models_selector/pretrained_models_selector.py,sha256=h6R5bFa1BNCqskFKV9yMGzhY4rf1ZT3ZBh8MKfG87Ko,12394
389
+ supervisely/app/widgets/pretrained_models_selector/pretrained_models_selector.py,sha256=y21fRcDWuP1RbqxNzy3MRxWFi8-f9pV0SsDaNIWqlvE,14102
390
390
  supervisely/app/widgets/pretrained_models_selector/style.css,sha256=po3FssuZhg3lKFU3VcTLqTW-qTCXLDnxYi2lCtYXhBc,363
391
391
  supervisely/app/widgets/pretrained_models_selector/template.html,sha256=4owO7X84Ii35vMioeTOwMAdR9OwlvLFe19MNPDX4kWM,5170
392
392
  supervisely/app/widgets/project_selector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -718,7 +718,7 @@ supervisely/nn/prediction_dto.py,sha256=8QQE6h_feOf3bjWtyG_PoU8FIQrr4g8PoMOyoscm
718
718
  supervisely/nn/task_type.py,sha256=UJvSJ4L3I08j_e6sU6Ptu7kS5p1H09rfhfoDUSZ2iys,522
719
719
  supervisely/nn/utils.py,sha256=-Xjv5KLu8CTtyi7acqsIX1E0dDwKZPED4D6b4Z_Ln3k,1451
720
720
  supervisely/nn/artifacts/__init__.py,sha256=m7KYTMzEJnoV9wcU_0xzgLuPz69Dqp9va0fP32tohV4,576
721
- supervisely/nn/artifacts/artifacts.py,sha256=SWVwaE40SPNElLp2NsRSOVh_ESRIXXZ1I-tMLEUYmTQ,20751
721
+ supervisely/nn/artifacts/artifacts.py,sha256=mizJzrzAMoURYnHU-Ap8OdUAxRngDtR5PC0RyDxBp8k,21079
722
722
  supervisely/nn/artifacts/detectron2.py,sha256=6iu5Yslc-SxCKJVNl6sn84qeXmD-JIQShJIxuLdzf2o,1673
723
723
  supervisely/nn/artifacts/hrda.py,sha256=3DzbjDIt9YuLozMrKmYYw13oxc14xju2vzbxKVq2G4I,1073
724
724
  supervisely/nn/artifacts/mmclassification.py,sha256=M0m9HHF5koHsl5RuFkRU0_clckA1sFty3X6awB2eKNo,1527
@@ -858,8 +858,8 @@ supervisely/nn/benchmark/visualization/widgets/sidebar/sidebar.py,sha256=tKPURRS
858
858
  supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
859
859
  supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
860
860
  supervisely/nn/inference/__init__.py,sha256=mtEci4Puu-fRXDnGn8RP47o97rv3VTE0hjbYO34Zwqg,1622
861
- supervisely/nn/inference/cache.py,sha256=KvzCgMbEBLdiJAxJDLicIPKAlYb52P9_kpNPWfiVY8Y,28194
862
- supervisely/nn/inference/inference.py,sha256=1QLeQmAmo29LoM9uFQjI5iDcuOyoltL4QR2Han9f2MM,128420
861
+ supervisely/nn/inference/cache.py,sha256=_pPSpkl8Wkqkiidn0vu6kWE19cngd80av--jncHxMEQ,30510
862
+ supervisely/nn/inference/inference.py,sha256=8MrOen2oyYIKiVqy0WbBTwABJZss9MLQ70EwX0e_-es,128895
863
863
  supervisely/nn/inference/session.py,sha256=jmkkxbe2kH-lEgUU6Afh62jP68dxfhF5v6OGDfLU62E,35757
864
864
  supervisely/nn/inference/video_inference.py,sha256=8Bshjr6rDyLay5Za8IB8Dr6FURMO2R_v7aELasO8pR4,5746
865
865
  supervisely/nn/inference/gui/__init__.py,sha256=wCxd-lF5Zhcwsis-wScDA8n1Gk_1O00PKgDviUZ3F1U,221
@@ -892,13 +892,13 @@ supervisely/nn/inference/salient_object_segmentation/salient_object_segmentation
892
892
  supervisely/nn/inference/semantic_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
893
893
  supervisely/nn/inference/semantic_segmentation/semantic_segmentation.py,sha256=xpmViSYm1v_ZxlYyqiD_DiB7_LEynv9ZoU0t2QHEx8A,3370
894
894
  supervisely/nn/inference/tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
895
- supervisely/nn/inference/tracking/bbox_tracking.py,sha256=URWGoYrCRN3yC4MyLx2_eZziZYwX6mPA8OLJqUOsAgA,15307
895
+ supervisely/nn/inference/tracking/bbox_tracking.py,sha256=EBEwLczXugCSYaIUFxB33h7SOIkPDEBWLhr_TbASNOA,17441
896
896
  supervisely/nn/inference/tracking/functional.py,sha256=LpVu2gvOOpr9D_uvwTPZey1wUCAhV-E20RPKmCSIrK4,1774
897
897
  supervisely/nn/inference/tracking/mask_tracking.py,sha256=qL9eUSqhzJwJMYaAzXX31oOu9EgdnGbsNwK9pOlV148,19610
898
898
  supervisely/nn/inference/tracking/object_tracking_3d.py,sha256=Kqvx1qe1G8F1VtdBiy2HJ251rJU6s3LWhj0ZedhrmUw,4327
899
899
  supervisely/nn/inference/tracking/point_tracking.py,sha256=Dweiq3dJUuwlFYnJbyx28L3IisNeg-1KQf2mBHrr7yI,22050
900
900
  supervisely/nn/inference/tracking/tracker3d_interface.py,sha256=7yIkNO9rgkzQuyXUUccLwqlv5k7RPbxTqz9uI4FylLE,2781
901
- supervisely/nn/inference/tracking/tracker_interface.py,sha256=MJFzSpQvhC0F-HoryRuoDinO_5xS5oFvT5zDE94ZyKY,10767
901
+ supervisely/nn/inference/tracking/tracker_interface.py,sha256=FXI9f0I5Tb5HN7l8fvxJ5wJ-QYuKyxfXiDpfXRLsSq4,10766
902
902
  supervisely/nn/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
903
903
  supervisely/nn/legacy/config.py,sha256=TKdyGtURJKzKoyydCZAfujoUnbC0SO8GeVLTSnoyS_w,2994
904
904
  supervisely/nn/legacy/dataset.py,sha256=-56EI6OYbkTWx4y8hOgD76y47zUoJNjGFyZ6JaP8iqg,6055
@@ -1057,9 +1057,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1057
1057
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1058
1058
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1059
1059
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1060
- supervisely-6.73.260.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1061
- supervisely-6.73.260.dist-info/METADATA,sha256=CPwcqKvIAjMyeGM6DRLQKHF3owWEcixltIqVe4T0x-E,33573
1062
- supervisely-6.73.260.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1063
- supervisely-6.73.260.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1064
- supervisely-6.73.260.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1065
- supervisely-6.73.260.dist-info/RECORD,,
1060
+ supervisely-6.73.262.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1061
+ supervisely-6.73.262.dist-info/METADATA,sha256=aMuL624leouIzMWfrL4bx-4A0hXHCT1hTTSHd9YYVbw,33573
1062
+ supervisely-6.73.262.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1063
+ supervisely-6.73.262.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1064
+ supervisely-6.73.262.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1065
+ supervisely-6.73.262.dist-info/RECORD,,