supervisely 6.73.447__py3-none-any.whl → 6.73.449__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,6 +28,8 @@ class Dialog(Widget):
28
28
  dialog = Dialog(title="Dialog title", content=Input("Input"), size="large")
29
29
  dialog.show()
30
30
  """
31
+ class Routes:
32
+ ON_CLOSE = "close_cb"
31
33
 
32
34
  def __init__(
33
35
  self,
@@ -41,6 +43,16 @@ class Dialog(Widget):
41
43
  self._size = size
42
44
  super().__init__(widget_id=widget_id, file_path=__file__)
43
45
 
46
+ server = self._sly_app.get_server()
47
+ route = self.get_route_path(Dialog.Routes.ON_CLOSE)
48
+ @server.post(route)
49
+ def _on_close():
50
+ # * Change visibility state to False when dialog is closed on client side
51
+ visible = StateJson()[self.widget_id]["visible"]
52
+ if visible is True:
53
+ StateJson()[self.widget_id]["visible"] = False
54
+ # * no need to call send_changes(), as it is already changed on client side
55
+
44
56
  def get_json_data(self) -> Dict[str, str]:
45
57
  """Returns dictionary with widget data, which defines the appearance and behavior of the widget.
46
58
 
@@ -2,8 +2,9 @@
2
2
  :title=data.{{{widget.widget_id}}}.title
3
3
  :size=data.{{{widget.widget_id}}}.size
4
4
  :visible.sync="state.{{{widget.widget_id}}}.visible"
5
+ @close="post('/{{{widget.widget_id}}}/close_cb');"
5
6
  >
6
7
  <div>
7
8
  {{{widget._content}}}
8
9
  </div>
9
- </el-dialog>
10
+ </el-dialog>
@@ -1105,31 +1105,37 @@ class Inference:
1105
1105
  self.model_precision = deploy_params.get("model_precision", ModelPrecision.FP32)
1106
1106
  self._hardware = get_hardware_info(self.device)
1107
1107
 
1108
- checkpoint_path = deploy_params["model_files"]["checkpoint"]
1109
- checkpoint_ext = sly_fs.get_file_ext(checkpoint_path)
1110
- if self.runtime == RuntimeType.TENSORRT and checkpoint_ext == ".engine":
1111
- try:
1112
- self.load_model(**deploy_params)
1113
- except Exception as e:
1114
- logger.warning(f"Failed to load model with TensorRT. Downloading PyTorch to export to TensorRT. Error: {repr(e)}")
1115
- checkpoint_path = self._fallback_download_custom_model_pt(deploy_params)
1116
- deploy_params["model_files"]["checkpoint"] = checkpoint_path
1117
- logger.info("Exporting PyTorch model to TensorRT...")
1118
- self._remove_exported_checkpoints(checkpoint_path)
1119
- checkpoint_path = self.export_tensorrt(deploy_params)
1108
+ model_files = deploy_params.get("model_files", None)
1109
+ if model_files is not None:
1110
+ checkpoint_path = deploy_params["model_files"]["checkpoint"]
1111
+ checkpoint_ext = sly_fs.get_file_ext(checkpoint_path)
1112
+ if self.runtime == RuntimeType.TENSORRT and checkpoint_ext == ".engine":
1113
+ try:
1114
+ self.load_model(**deploy_params)
1115
+ except Exception as e:
1116
+ logger.warning(
1117
+ f"Failed to load model with TensorRT. Downloading PyTorch to export to TensorRT. Error: {repr(e)}"
1118
+ )
1119
+ checkpoint_path = self._fallback_download_custom_model_pt(deploy_params)
1120
+ deploy_params["model_files"]["checkpoint"] = checkpoint_path
1121
+ logger.info("Exporting PyTorch model to TensorRT...")
1122
+ self._remove_exported_checkpoints(checkpoint_path)
1123
+ checkpoint_path = self.export_tensorrt(deploy_params)
1124
+ deploy_params["model_files"]["checkpoint"] = checkpoint_path
1125
+ self.load_model(**deploy_params)
1126
+ if checkpoint_ext in (".pt", ".pth") and not self.runtime == RuntimeType.PYTORCH:
1127
+ if self.runtime == RuntimeType.ONNXRUNTIME:
1128
+ logger.info("Exporting PyTorch model to ONNX...")
1129
+ self._remove_exported_checkpoints(checkpoint_path)
1130
+ checkpoint_path = self.export_onnx(deploy_params)
1131
+ elif self.runtime == RuntimeType.TENSORRT:
1132
+ logger.info("Exporting PyTorch model to TensorRT...")
1133
+ self._remove_exported_checkpoints(checkpoint_path)
1134
+ checkpoint_path = self.export_tensorrt(deploy_params)
1120
1135
  deploy_params["model_files"]["checkpoint"] = checkpoint_path
1121
1136
  self.load_model(**deploy_params)
1122
- if checkpoint_ext in (".pt", ".pth") and not self.runtime == RuntimeType.PYTORCH:
1123
- if self.runtime == RuntimeType.ONNXRUNTIME:
1124
- logger.info("Exporting PyTorch model to ONNX...")
1125
- self._remove_exported_checkpoints(checkpoint_path)
1126
- checkpoint_path = self.export_onnx(deploy_params)
1127
- elif self.runtime == RuntimeType.TENSORRT:
1128
- logger.info("Exporting PyTorch model to TensorRT...")
1129
- self._remove_exported_checkpoints(checkpoint_path)
1130
- checkpoint_path = self.export_tensorrt(deploy_params)
1131
- deploy_params["model_files"]["checkpoint"] = checkpoint_path
1132
- self.load_model(**deploy_params)
1137
+ else:
1138
+ self.load_model(**deploy_params)
1133
1139
  else:
1134
1140
  self.load_model(**deploy_params)
1135
1141
 
@@ -180,7 +180,13 @@ class TrainValSplitsSelector:
180
180
  return False
181
181
 
182
182
  # Check if datasets are not empty
183
- filters = [{ ApiField.FIELD: ApiField.ID, ApiField.OPERATOR: "in", ApiField.VALUE: train_dataset_id + val_dataset_id}]
183
+ filters = [
184
+ {
185
+ ApiField.FIELD: ApiField.ID,
186
+ ApiField.OPERATOR: "in",
187
+ ApiField.VALUE: train_dataset_id + val_dataset_id,
188
+ }
189
+ ]
184
190
  selected_datasets = self.api.dataset.get_list(self.project_id, filters, recursive=True)
185
191
  datasets_count = {}
186
192
  for dataset in selected_datasets:
@@ -334,6 +340,7 @@ class TrainValSplitsSelector:
334
340
 
335
341
  def _detect_splits(self, collections_split: bool, datasets_split: bool) -> bool:
336
342
  """Detect splits based on the selected method"""
343
+ self._parse_collections()
337
344
  splits_found = False
338
345
  if collections_split:
339
346
  splits_found = self._detect_collections()
@@ -341,47 +348,59 @@ class TrainValSplitsSelector:
341
348
  splits_found = self._detect_datasets()
342
349
  return splits_found
343
350
 
351
+ def _parse_collections(self) -> None:
352
+ """Parse collections with train and val prefixes and set them to train_val_splits variables"""
353
+ all_collections = self.api.entities_collection.get_list(self.project_id)
354
+ existing_train_collections = [
355
+ collection for collection in all_collections if collection.name.startswith("train_")
356
+ ]
357
+ existing_val_collections = [
358
+ collection for collection in all_collections if collection.name.startswith("val_")
359
+ ]
360
+
361
+ self._all_train_collections = existing_train_collections
362
+ self._all_val_collections = existing_val_collections
363
+ self._latest_train_collection = self._get_latest_collection(existing_train_collections, "train")
364
+ self._latest_val_collection = self._get_latest_collection(existing_val_collections, "val")
365
+
366
+ def _get_latest_collection(
367
+ self, collections: List[EntitiesCollectionInfo], expected_prefix: str
368
+ ) -> EntitiesCollectionInfo:
369
+ curr_collection = None
370
+ curr_idx = 0
371
+ for collection in collections:
372
+ parts = collection.name.split("_")
373
+ if len(parts) == 2:
374
+ prefix = parts[0].lower()
375
+ if prefix == expected_prefix:
376
+ if parts[1].isdigit():
377
+ collection_idx = int(parts[1])
378
+ if collection_idx > curr_idx:
379
+ curr_idx = collection_idx
380
+ curr_collection = collection
381
+ return curr_collection
382
+
383
+
344
384
  def _detect_collections(self) -> bool:
345
385
  """Find collections with train and val prefixes and set them to train_val_splits"""
346
- def _get_latest_collection(collections: List[EntitiesCollectionInfo]) -> EntitiesCollectionInfo:
347
- curr_collection = None
348
- curr_idx = 0
349
- for collection in collections:
350
- collection_idx = int(collection.name.rsplit('_', 1)[-1])
351
- if collection_idx > curr_idx:
352
- curr_idx = collection_idx
353
- curr_collection = collection
354
- return curr_collection
355
386
 
356
- all_collections = self.api.entities_collection.get_list(self.project_id)
357
- train_collections = []
358
- val_collections = []
359
387
  collections_found = False
360
- for collection in all_collections:
361
- if collection.name.lower().startswith("train_"):
362
- train_collections.append(collection)
363
- elif collection.name.lower().startswith("val_"):
364
- val_collections.append(collection)
365
-
366
- train_collection = _get_latest_collection(train_collections)
367
- val_collection = _get_latest_collection(val_collections)
368
- if train_collection is not None and val_collection is not None:
369
- self.train_val_splits.set_collections_splits([train_collection.id], [val_collection.id])
388
+ if self._latest_train_collection is not None and self._latest_val_collection is not None:
389
+ self.train_val_splits.set_collections_splits(
390
+ [self._latest_train_collection.id], [self._latest_val_collection.id]
391
+ )
370
392
  self.validator_text = Text("Train and val collections are detected", status="info")
371
393
  self.validator_text.show()
372
394
  collections_found = True
373
- self._all_train_collections = train_collections
374
- self._all_val_collections = val_collections
375
- self._latest_train_collection = train_collection
376
- self._latest_val_collection = val_collection
377
395
  else:
378
396
  self.validator_text = Text("")
379
397
  self.validator_text.hide()
380
398
  collections_found = False
381
399
  return collections_found
382
-
400
+
383
401
  def _detect_datasets(self) -> bool:
384
402
  """Find datasets with train and val prefixes and set them to train_val_splits"""
403
+
385
404
  def _extend_with_nested(root_ds):
386
405
  nested = self.api.dataset.get_nested(self.project_id, root_ds.id)
387
406
  nested_ids = [ds.id for ds in nested]
@@ -407,7 +426,9 @@ class TrainValSplitsSelector:
407
426
  val_count = len(train_val_dataset_ids["val"])
408
427
 
409
428
  if train_count > 0 and val_count > 0:
410
- self.train_val_splits.set_datasets_splits(train_val_dataset_ids["train"], train_val_dataset_ids["val"])
429
+ self.train_val_splits.set_datasets_splits(
430
+ train_val_dataset_ids["train"], train_val_dataset_ids["val"]
431
+ )
411
432
  datasets_found = True
412
433
 
413
434
  if train_count > 0 and val_count > 0:
@@ -415,7 +436,7 @@ class TrainValSplitsSelector:
415
436
  message = "train and val datasets are detected"
416
437
  else:
417
438
  message = "Multiple train and val datasets are detected. Check manually if selection is correct"
418
-
439
+
419
440
  self.validator_text = Text(message, status="info")
420
441
  self.validator_text.show()
421
442
  datasets_found = True
@@ -423,4 +444,4 @@ class TrainValSplitsSelector:
423
444
  self.validator_text = Text("")
424
445
  self.validator_text.hide()
425
446
  datasets_found = False
426
- return datasets_found
447
+ return datasets_found
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.447
3
+ Version: 6.73.449
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -230,8 +230,8 @@ supervisely/app/widgets/destination_project/__init__.py,sha256=47DEQpj8HBSa-_TIm
230
230
  supervisely/app/widgets/destination_project/destination_project.py,sha256=zSFrCfvFMbeZY4uC3FRz-EkEpuqU24Mq-KqAckOc184,5476
231
231
  supervisely/app/widgets/destination_project/template.html,sha256=AhhHte0PDFwbYRs06Q824gz3uy68ouVur_KM4FaCWtE,5526
232
232
  supervisely/app/widgets/dialog/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
233
- supervisely/app/widgets/dialog/dialog.py,sha256=Pg1Lk3Hh1sLYyX06sbvLvVhyTWHvR92Vy9aqOR3UPwQ,3023
234
- supervisely/app/widgets/dialog/template.html,sha256=OGakbpPL9KKVHL-_entT5g_4RTiW0xxJiBPjsPy3V9k,207
233
+ supervisely/app/widgets/dialog/dialog.py,sha256=Uc7K6wLv4HYUiISbmyNZnLsyjDMYgZ2P5NedhniXl8I,3563
234
+ supervisely/app/widgets/dialog/template.html,sha256=ccYatR1_h9MktDpGUHNTw88_Hihw7Wsxl0k4l7r54WQ,261
235
235
  supervisely/app/widgets/done_label/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
236
236
  supervisely/app/widgets/done_label/done_label.py,sha256=7IMLbt4v-six26QNfuJUg2CzvL-lMiarEMCF1Fwywsg,1944
237
237
  supervisely/app/widgets/done_label/template.html,sha256=PShF4P5x4InVFYAJUPRdMfjY7lJGSfaAFUAG8chxllo,238
@@ -904,7 +904,7 @@ supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8
904
904
  supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
905
905
  supervisely/nn/inference/__init__.py,sha256=QFukX2ip-U7263aEPCF_UCFwj6EujbMnsgrXp5Bbt8I,1623
906
906
  supervisely/nn/inference/cache.py,sha256=rfmb1teJ9lNDfisUSh6bwDCVkPZocn8GMvDgLQktnbo,35023
907
- supervisely/nn/inference/inference.py,sha256=p1yUIB85nTOmwVJbqzoU_DLTVcUHrJiF18bqB1I9IrA,207202
907
+ supervisely/nn/inference/inference.py,sha256=71E5_DG5He2y9TWu2YN8P9MWz0EwldGJa_7pK6pVp7E,207512
908
908
  supervisely/nn/inference/inference_request.py,sha256=1Tq-OV7bYtr0bKDqvBXh72wpR5Misgk-iQn5waCxtqo,14830
909
909
  supervisely/nn/inference/session.py,sha256=f2Tyvj21oO9AKxqr6_yHZ81Ol-wXC-h5cweTHEoljkg,35796
910
910
  supervisely/nn/inference/uploader.py,sha256=Dn5MfMRq7tclEWpP0B9fJjTiQPBpwumfXxC8-lOYgnM,5659
@@ -1020,7 +1020,7 @@ supervisely/nn/training/gui/hyperparameters_selector.py,sha256=tEyppV5ay7nECi6qB
1020
1020
  supervisely/nn/training/gui/input_selector.py,sha256=rmirJzpdxuYONI6y5_cvMdGWBJ--T20YTsISghATHu4,2510
1021
1021
  supervisely/nn/training/gui/model_selector.py,sha256=YKBAk6MheulFEl9TF9_mVtE3-Hsc0B3LmeOzMiV6AlQ,7487
1022
1022
  supervisely/nn/training/gui/tags_selector.py,sha256=0yg2OGPqiHUBp3iML2vrzTOVeSKtRtR9JoMy4Snx41U,3755
1023
- supervisely/nn/training/gui/train_val_splits_selector.py,sha256=CqVRp3nz6nJ8nk9J-5QiMMYiTsaTCAwLMg-ZqRr0Fto,18866
1023
+ supervisely/nn/training/gui/train_val_splits_selector.py,sha256=NWyulmr3lBZU1tcT_2HXXVIag-vpQjSjzNvtqJ_f2kw,19409
1024
1024
  supervisely/nn/training/gui/training_artifacts.py,sha256=ZyTnB9PyhwsqGAANwnpyLriJtAb4p0f03Yhmm_jkfIE,10946
1025
1025
  supervisely/nn/training/gui/training_logs.py,sha256=GgEQMj9p98Z3p2b_-3BkHOhY7WQYELxctsRKmkbg3JY,4966
1026
1026
  supervisely/nn/training/gui/training_process.py,sha256=XJ3ELyys_rBFmLQnI9qe3QhmfZ6U0CrK1FbI6d-Fbns,3664
@@ -1127,9 +1127,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1127
1127
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1128
1128
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1129
1129
  supervisely_lib/__init__.py,sha256=yRwzEQmVwSd6lUQoAUdBngKEOlnoQ6hA9ZcoZGJRNC4,331
1130
- supervisely-6.73.447.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1131
- supervisely-6.73.447.dist-info/METADATA,sha256=Fb3qYfdHQT27KPMPPY3g82sbHQOyxe_ApnyLfPYeUHg,35480
1132
- supervisely-6.73.447.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
1133
- supervisely-6.73.447.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1134
- supervisely-6.73.447.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1135
- supervisely-6.73.447.dist-info/RECORD,,
1130
+ supervisely-6.73.449.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1131
+ supervisely-6.73.449.dist-info/METADATA,sha256=d65TCju-7Q_u75UWtDWAgTcIRszBoX5r8KPONLcp0Bo,35480
1132
+ supervisely-6.73.449.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
1133
+ supervisely-6.73.449.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1134
+ supervisely-6.73.449.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1135
+ supervisely-6.73.449.dist-info/RECORD,,