supervisely 6.73.278__py3-none-any.whl → 6.73.279__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -11,6 +11,9 @@ import supervisely.io.env as sly_env
11
11
  from supervisely import Api, ProjectMeta
12
12
  from supervisely._utils import is_production
13
13
  from supervisely.app.widgets import Stepper, Widget
14
+ from supervisely.geometry.polygon import Polygon
15
+ from supervisely.geometry.rectangle import Rectangle
16
+ from supervisely.nn.task_type import TaskType
14
17
  from supervisely.nn.training.gui.classes_selector import ClassesSelector
15
18
  from supervisely.nn.training.gui.hyperparameters_selector import HyperparametersSelector
16
19
  from supervisely.nn.training.gui.input_selector import InputSelector
@@ -137,6 +140,26 @@ class TrainGUI:
137
140
  return
138
141
  self.training_process.set_experiment_name(experiment_name)
139
142
 
143
+ def need_convert_class_shapes() -> bool:
144
+ task_type = self.model_selector.get_selected_task_type()
145
+
146
+ def _need_convert(shape):
147
+ if task_type == TaskType.OBJECT_DETECTION:
148
+ return shape != Rectangle.geometry_name()
149
+ elif task_type in [TaskType.INSTANCE_SEGMENTATION, TaskType.SEMANTIC_SEGMENTATION]:
150
+ return shape == Polygon.geometry_name()
151
+ return
152
+
153
+ data = self.classes_selector.classes_table._table_data
154
+ selected_classes = set(self.classes_selector.classes_table.get_selected_classes())
155
+ empty = set(r[0]["data"] for r in data if r[2]["data"] == 0 and r[3]["data"] == 0)
156
+ need_convert = set(r[0]["data"] for r in data if _need_convert(r[1]["data"]))
157
+
158
+ if need_convert.intersection(selected_classes - empty):
159
+ self.hyperparameters_selector.model_benchmark_auto_convert_warning.show()
160
+ else:
161
+ self.hyperparameters_selector.model_benchmark_auto_convert_warning.hide()
162
+
140
163
  # ------------------------------------------------- #
141
164
 
142
165
  # Wrappers
@@ -168,7 +191,7 @@ class TrainGUI:
168
191
  callback=self.hyperparameters_selector_cb,
169
192
  validation_text=self.model_selector.validator_text,
170
193
  validation_func=self.model_selector.validate_step,
171
- on_select_click=[set_experiment_name],
194
+ on_select_click=[set_experiment_name, need_convert_class_shapes],
172
195
  collapse_card=(self.model_selector.card, self.collapsable),
173
196
  )
174
197
 
@@ -276,6 +299,7 @@ class TrainGUI:
276
299
  @self.hyperparameters_selector.run_model_benchmark_checkbox.value_changed
277
300
  def show_mb_speedtest(is_checked: bool):
278
301
  self.hyperparameters_selector.toggle_mb_speedtest(is_checked)
302
+ need_convert_class_shapes()
279
303
 
280
304
  # ------------------------------------------------- #
281
305
 
@@ -44,8 +44,18 @@ class HyperparametersSelector:
44
44
  self.model_benchmark_learn_more = Text(
45
45
  f"Learn more about Model Benchmark in the {docs_link}.", status="info"
46
46
  )
47
+ self.model_benchmark_auto_convert_warning = Text(
48
+ text="Project will be automatically converted according to CV task for Model Evaluation.",
49
+ status="warning",
50
+ )
51
+ self.model_benchmark_auto_convert_warning.hide()
52
+
47
53
  self.display_widgets.extend(
48
- [self.model_benchmark_field, self.model_benchmark_learn_more]
54
+ [
55
+ self.model_benchmark_field,
56
+ self.model_benchmark_learn_more,
57
+ self.model_benchmark_auto_convert_warning,
58
+ ]
49
59
  )
50
60
  # -------------------------------- #
51
61
 
@@ -12,9 +12,9 @@ from supervisely.app.widgets import (
12
12
  RadioTabs,
13
13
  Text,
14
14
  )
15
+ from supervisely.nn.artifacts.utils import FrameworkMapper
15
16
  from supervisely.nn.experiments import get_experiment_infos
16
17
  from supervisely.nn.utils import ModelSource
17
- from supervisely.nn.artifacts.utils import FrameworkMapper, FrameworkName
18
18
 
19
19
 
20
20
  class ModelSelector:
@@ -37,7 +37,7 @@ class ModelSelector:
37
37
  legacy_experiment_infos = framework_cls.get_list_experiment_info()
38
38
  experiment_infos = experiment_infos + legacy_experiment_infos
39
39
  except:
40
- logger.warn(f"Legacy checkpoints are not available for '{framework}'")
40
+ logger.warning(f"Legacy checkpoints are not available for '{framework}'")
41
41
 
42
42
  self.experiment_selector = ExperimentSelector(self.team_id, experiment_infos)
43
43
  self.model_source_tabs = RadioTabs(
@@ -106,3 +106,9 @@ class ModelSelector:
106
106
  self.validator_text.set(text="Model is selected", status="success")
107
107
  self.validator_text.show()
108
108
  return True
109
+
110
+ def get_selected_task_type(self) -> str:
111
+ if self.get_model_source() == ModelSource.PRETRAINED:
112
+ return self.pretrained_models_table.get_selected_task_type()
113
+ else:
114
+ return self.experiment_selector.get_selected_task_type()
@@ -25,6 +25,7 @@ import supervisely.io.json as sly_json
25
25
  from supervisely import (
26
26
  Api,
27
27
  Application,
28
+ Dataset,
28
29
  DatasetInfo,
29
30
  OpenMode,
30
31
  Project,
@@ -32,6 +33,7 @@ from supervisely import (
32
33
  ProjectMeta,
33
34
  WorkflowMeta,
34
35
  WorkflowSettings,
36
+ batched,
35
37
  download_project,
36
38
  is_development,
37
39
  is_production,
@@ -340,22 +342,6 @@ class TrainApp:
340
342
  """
341
343
  return self.gui.model_selector.get_model_info()
342
344
 
343
- @property
344
- def model_meta(self) -> ProjectMeta:
345
- """
346
- Returns the model metadata.
347
-
348
- :return: Model metadata.
349
- :rtype: dict
350
- """
351
- project_meta_json = self.project_meta.to_json()
352
- model_meta = {
353
- "classes": [
354
- item for item in project_meta_json["classes"] if item["title"] in self.classes
355
- ]
356
- }
357
- return ProjectMeta.from_json(model_meta)
358
-
359
345
  @property
360
346
  def device(self) -> str:
361
347
  """
@@ -505,8 +491,7 @@ class TrainApp:
505
491
  self._download_project()
506
492
  # Step 3. Split Project
507
493
  self._split_project()
508
- # Step 4. Convert Supervisely to X format
509
- # Step 5. Download Model files
494
+ # Step 4. Download Model files
510
495
  self._download_model()
511
496
 
512
497
  def _finalize(self, experiment_info: dict) -> None:
@@ -528,13 +513,14 @@ class TrainApp:
528
513
  experiment_info = self._preprocess_artifacts(experiment_info)
529
514
 
530
515
  # Step3. Postprocess splits
531
- splits_data = self._postprocess_splits()
516
+ train_splits_data = self._postprocess_splits()
532
517
 
533
518
  # Step 3. Upload artifacts
534
519
  self._set_text_status("uploading")
535
520
  remote_dir, file_info = self._upload_artifacts()
536
521
 
537
522
  # Step 4. Run Model Benchmark
523
+ model_meta = self.create_model_meta(experiment_info["task_type"])
538
524
  mb_eval_lnk_file_info, mb_eval_report, mb_eval_report_id, eval_metrics = (
539
525
  None,
540
526
  None,
@@ -543,6 +529,15 @@ class TrainApp:
543
529
  )
544
530
  if self.is_model_benchmark_enabled:
545
531
  try:
532
+ # Convert GT project
533
+ if self._app_options.get("auto_convert_classes", True):
534
+ self._set_text_status("convert_gt_project")
535
+ gt_project_id, bm_splits_data = self._convert_and_split_gt_project(
536
+ experiment_info["task_type"]
537
+ )
538
+ else:
539
+ gt_project_id, bm_splits_data = None, train_splits_data
540
+
546
541
  self._set_text_status("benchmark")
547
542
  (
548
543
  mb_eval_lnk_file_info,
@@ -550,7 +545,12 @@ class TrainApp:
550
545
  mb_eval_report_id,
551
546
  eval_metrics,
552
547
  ) = self._run_model_benchmark(
553
- self.output_dir, remote_dir, experiment_info, splits_data
548
+ self.output_dir,
549
+ remote_dir,
550
+ experiment_info,
551
+ bm_splits_data,
552
+ model_meta,
553
+ gt_project_id,
554
554
  )
555
555
  except Exception as e:
556
556
  logger.error(f"Model benchmark failed: {e}")
@@ -571,8 +571,8 @@ class TrainApp:
571
571
  )
572
572
  self._generate_app_state(remote_dir, experiment_info)
573
573
  self._generate_hyperparameters(remote_dir, experiment_info)
574
- self._generate_train_val_splits(remote_dir, splits_data)
575
- self._generate_model_meta(remote_dir, experiment_info)
574
+ self._generate_train_val_splits(remote_dir, train_splits_data)
575
+ self._generate_model_meta(remote_dir, model_meta)
576
576
  self._upload_demo_files(remote_dir)
577
577
 
578
578
  # Step 7. Set output widgets
@@ -1200,9 +1200,14 @@ class TrainApp:
1200
1200
  logger.debug("Validation successful")
1201
1201
  return True, None
1202
1202
 
1203
- def _postprocess_splits(self) -> dict:
1203
+ def _postprocess_splits(self, project_id: Optional[int] = None) -> dict:
1204
1204
  """
1205
1205
  Processes the train and val splits to generate the necessary data for the experiment_info.json file.
1206
+
1207
+ :param project_id: ID of the ground truth project for model benchmark. Provide only when cv task convertion is required.
1208
+ :type project_id: Optional[int]
1209
+ :return: Splits data.
1210
+ :rtype: dict
1206
1211
  """
1207
1212
  val_dataset_ids = None
1208
1213
  val_images_ids = None
@@ -1212,10 +1217,30 @@ class TrainApp:
1212
1217
  split_method = self.gui.train_val_splits_selector.get_split_method()
1213
1218
  train_set, val_set = self._train_split, self._val_split
1214
1219
  if split_method == "Based on datasets":
1215
- val_dataset_ids = self.gui.train_val_splits_selector.get_val_dataset_ids()
1216
- train_dataset_ids = self.gui.train_val_splits_selector.get_train_dataset_ids
1220
+ if project_id is None:
1221
+ val_dataset_ids = self.gui.train_val_splits_selector.get_val_dataset_ids()
1222
+ train_dataset_ids = self.gui.train_val_splits_selector.get_train_dataset_ids()
1223
+ else:
1224
+ src_datasets_map = {
1225
+ dataset.id: dataset
1226
+ for _, dataset in self._api.dataset.tree(self.project_info.id)
1227
+ }
1228
+ val_dataset_ids = self.gui.train_val_splits_selector.get_val_dataset_ids()
1229
+ train_dataset_ids = self.gui.train_val_splits_selector.get_train_dataset_ids()
1230
+
1231
+ train_dataset_names = [src_datasets_map[ds_id].name for ds_id in train_dataset_ids]
1232
+ val_dataset_names = [src_datasets_map[ds_id].name for ds_id in val_dataset_ids]
1233
+
1234
+ gt_datasets_map = {
1235
+ dataset.name: dataset.id for _, dataset in self._api.dataset.tree(project_id)
1236
+ }
1237
+ train_dataset_ids = [gt_datasets_map[ds_name] for ds_name in train_dataset_names]
1238
+ val_dataset_ids = [gt_datasets_map[ds_name] for ds_name in val_dataset_names]
1217
1239
  else:
1218
- dataset_infos = [dataset for _, dataset in self._api.dataset.tree(self.project_id)]
1240
+ if project_id is None:
1241
+ project_id = self.project_id
1242
+
1243
+ dataset_infos = [dataset for _, dataset in self._api.dataset.tree(project_id)]
1219
1244
  ds_infos_dict = {}
1220
1245
  for dataset in dataset_infos:
1221
1246
  if dataset.parent_id is not None:
@@ -1232,18 +1257,19 @@ class TrainApp:
1232
1257
  image_infos = []
1233
1258
  for dataset_name, image_names in image_names_per_dataset.items():
1234
1259
  ds_info = ds_infos_dict[dataset_name]
1235
- image_infos.extend(
1236
- self._api.image.get_list(
1237
- ds_info.id,
1238
- filters=[
1239
- {
1240
- "field": "name",
1241
- "operator": "in",
1242
- "value": image_names,
1243
- }
1244
- ],
1260
+ for names_batch in batched(image_names, 200):
1261
+ image_infos.extend(
1262
+ self._api.image.get_list(
1263
+ ds_info.id,
1264
+ filters=[
1265
+ {
1266
+ "field": "name",
1267
+ "operator": "in",
1268
+ "value": names_batch,
1269
+ }
1270
+ ],
1271
+ )
1245
1272
  )
1246
- )
1247
1273
  return image_infos
1248
1274
 
1249
1275
  val_image_infos = get_image_infos_by_split(ds_infos_dict, val_set)
@@ -1373,7 +1399,7 @@ class TrainApp:
1373
1399
  f"Uploading '{self._train_val_split_file}' to Team Files",
1374
1400
  )
1375
1401
 
1376
- def _generate_model_meta(self, remote_dir: str, experiment_info: dict) -> None:
1402
+ def _generate_model_meta(self, remote_dir: str, model_meta: ProjectMeta) -> None:
1377
1403
  """
1378
1404
  Generates and uploads the model_meta.json file to the output directory.
1379
1405
 
@@ -1382,17 +1408,31 @@ class TrainApp:
1382
1408
  :param experiment_info: Information about the experiment results.
1383
1409
  :type experiment_info: dict
1384
1410
  """
1385
- # @TODO: Handle tags for classification tasks
1386
1411
  local_path = join(self.output_dir, self._model_meta_file)
1387
1412
  remote_path = join(remote_dir, self._model_meta_file)
1388
1413
 
1389
- sly_json.dump_json_file(self.model_meta.to_json(), local_path)
1414
+ sly_json.dump_json_file(model_meta.to_json(), local_path)
1390
1415
  self._upload_file_to_team_files(
1391
1416
  local_path,
1392
1417
  remote_path,
1393
1418
  f"Uploading '{self._model_meta_file}' to Team Files",
1394
1419
  )
1395
1420
 
1421
+ def create_model_meta(self, task_type: str):
1422
+ """
1423
+ Convert project meta according to task type.
1424
+ """
1425
+ names_to_delete = [
1426
+ c.name for c in self.project_meta.obj_classes if c.name not in self.classes
1427
+ ]
1428
+ model_meta = self.project_meta.delete_obj_classes(names_to_delete)
1429
+
1430
+ if task_type == TaskType.OBJECT_DETECTION:
1431
+ model_meta, _ = model_meta.to_detection_task(True)
1432
+ elif task_type in [TaskType.INSTANCE_SEGMENTATION, TaskType.SEMANTIC_SEGMENTATION]:
1433
+ model_meta, _ = model_meta.to_segmentation_task() # @TODO: check background class
1434
+ return model_meta
1435
+
1396
1436
  def _generate_experiment_info(
1397
1437
  self,
1398
1438
  remote_dir: str,
@@ -1740,6 +1780,8 @@ class TrainApp:
1740
1780
  remote_artifacts_dir: str,
1741
1781
  experiment_info: dict,
1742
1782
  splits_data: dict,
1783
+ model_meta: ProjectInfo,
1784
+ gt_project_id: int = None,
1743
1785
  ) -> tuple:
1744
1786
  """
1745
1787
  Runs the Model Benchmark evaluation process. Model benchmark runs only in production mode.
@@ -1752,6 +1794,10 @@ class TrainApp:
1752
1794
  :type experiment_info: dict
1753
1795
  :param splits_data: Information about the train and val splits.
1754
1796
  :type splits_data: dict
1797
+ :param model_meta: Model meta with object classes.
1798
+ :type model_meta: ProjectInfo
1799
+ :param gt_project_id: Ground truth project ID with converted shapes.
1800
+ :type gt_project_id: int
1755
1801
  :return: Evaluation report, report ID and evaluation metrics.
1756
1802
  :rtype: tuple
1757
1803
  """
@@ -1767,6 +1813,7 @@ class TrainApp:
1767
1813
  supported_task_types = [
1768
1814
  TaskType.OBJECT_DETECTION,
1769
1815
  TaskType.INSTANCE_SEGMENTATION,
1816
+ TaskType.SEMANTIC_SEGMENTATION,
1770
1817
  ]
1771
1818
  task_type = experiment_info["task_type"]
1772
1819
  if task_type not in supported_task_types:
@@ -1807,7 +1854,7 @@ class TrainApp:
1807
1854
  "artifacts_dir": remote_artifacts_dir,
1808
1855
  "model_name": experiment_info["model_name"],
1809
1856
  "framework_name": self.framework_name,
1810
- "model_meta": self.model_meta.to_json(),
1857
+ "model_meta": model_meta.to_json(),
1811
1858
  }
1812
1859
 
1813
1860
  logger.info(f"Deploy parameters: {self._benchmark_params}")
@@ -1827,12 +1874,15 @@ class TrainApp:
1827
1874
  train_images_ids = splits_data["train"]["images_ids"]
1828
1875
 
1829
1876
  bm = None
1877
+ if gt_project_id is None:
1878
+ gt_project_id = self.project_info.id
1879
+
1830
1880
  if task_type == TaskType.OBJECT_DETECTION:
1831
1881
  eval_params = ObjectDetectionEvaluator.load_yaml_evaluation_params()
1832
1882
  eval_params = yaml.safe_load(eval_params)
1833
1883
  bm = ObjectDetectionBenchmark(
1834
1884
  self._api,
1835
- self.project_info.id,
1885
+ gt_project_id,
1836
1886
  output_dir=benchmark_dir,
1837
1887
  gt_dataset_ids=benchmark_dataset_ids,
1838
1888
  gt_images_ids=benchmark_images_ids,
@@ -1846,7 +1896,7 @@ class TrainApp:
1846
1896
  eval_params = yaml.safe_load(eval_params)
1847
1897
  bm = InstanceSegmentationBenchmark(
1848
1898
  self._api,
1849
- self.project_info.id,
1899
+ gt_project_id,
1850
1900
  output_dir=benchmark_dir,
1851
1901
  gt_dataset_ids=benchmark_dataset_ids,
1852
1902
  gt_images_ids=benchmark_images_ids,
@@ -1860,7 +1910,7 @@ class TrainApp:
1860
1910
  eval_params = yaml.safe_load(eval_params)
1861
1911
  bm = SemanticSegmentationBenchmark(
1862
1912
  self._api,
1863
- self.project_info.id,
1913
+ gt_project_id,
1864
1914
  output_dir=benchmark_dir,
1865
1915
  gt_dataset_ids=benchmark_dataset_ids,
1866
1916
  gt_images_ids=benchmark_images_ids,
@@ -2280,6 +2330,7 @@ class TrainApp:
2280
2330
  "metadata",
2281
2331
  "export_onnx",
2282
2332
  "export_trt",
2333
+ "convert_gt_project",
2283
2334
  ],
2284
2335
  ):
2285
2336
 
@@ -2313,6 +2364,8 @@ class TrainApp:
2313
2364
  self.gui.training_process.validator_text.set("Validating experiment...", "info")
2314
2365
  elif status == "metadata":
2315
2366
  self.gui.training_process.validator_text.set("Generating training metadata...", "info")
2367
+ elif status == "convert_gt_project":
2368
+ self.gui.training_process.validator_text.set("Converting GT project...", "info")
2316
2369
 
2317
2370
  def _set_ws_progress_status(
2318
2371
  self,
@@ -2391,3 +2444,54 @@ class TrainApp:
2391
2444
  for runtime, path in export_weights.items()
2392
2445
  }
2393
2446
  return remote_export_weights
2447
+
2448
+ def _convert_and_split_gt_project(self, task_type: str):
2449
+ # 1. Convert GT project to cv task
2450
+ Project.download(
2451
+ self._api, self.project_info.id, "tmp_project", save_images=False, save_image_info=True
2452
+ )
2453
+ project = Project("tmp_project", OpenMode.READ)
2454
+
2455
+ pr_prefix = ""
2456
+ if task_type == TaskType.OBJECT_DETECTION:
2457
+ Project.to_detection_task(project.directory, inplace=True)
2458
+ pr_prefix = "[detection]: "
2459
+ # @TODO: dont convert segmentation?
2460
+ elif (
2461
+ task_type == TaskType.INSTANCE_SEGMENTATION
2462
+ or task_type == TaskType.SEMANTIC_SEGMENTATION
2463
+ ):
2464
+ Project.to_segmentation_task(project.directory, inplace=True)
2465
+ pr_prefix = "[segmentation]: "
2466
+
2467
+ gt_project_info = self._api.project.create(
2468
+ self.workspace_id,
2469
+ f"{pr_prefix}{self.project_info.name}",
2470
+ description=(
2471
+ f"Converted ground truth project for trainig session: '{self.task_id}'. "
2472
+ f"Original project id: '{self.project_info.id}. "
2473
+ "Removing this project will affect model benchmark evaluation report."
2474
+ ),
2475
+ change_name_if_conflict=True,
2476
+ )
2477
+
2478
+ # 3. Upload gt project to benchmark workspace
2479
+ project = Project("tmp_project", OpenMode.READ)
2480
+ self._api.project.update_meta(gt_project_info.id, project.meta)
2481
+ for dataset in project.datasets:
2482
+ dataset: Dataset
2483
+ ds_info = self._api.dataset.create(
2484
+ gt_project_info.id, dataset.name, change_name_if_conflict=True
2485
+ )
2486
+ for batch_names in batched(dataset.get_items_names(), 100):
2487
+ img_infos = [dataset.get_item_info(name) for name in batch_names]
2488
+ img_ids = [img_info.id for img_info in img_infos]
2489
+ anns = [dataset.get_ann(name, project.meta) for name in batch_names]
2490
+
2491
+ img_infos = self._api.image.copy_batch(ds_info.id, img_ids)
2492
+ img_ids = [img_info.id for img_info in img_infos]
2493
+ self._api.annotation.upload_anns(img_ids, anns)
2494
+
2495
+ # 4. Match splits with original project
2496
+ gt_split_data = self._postprocess_splits(gt_project_info.id)
2497
+ return gt_project_info.id, gt_split_data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.278
3
+ Version: 6.73.279
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -968,13 +968,13 @@ supervisely/nn/tracker/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
968
968
  supervisely/nn/tracker/utils/gmc.py,sha256=3JX8979H3NA-YHNaRQyj9Z-xb9qtyMittPEjGw8y2Jo,11557
969
969
  supervisely/nn/tracker/utils/kalman_filter.py,sha256=eSFmCjM0mikHCAFvj-KCVzw-0Jxpoc3Cfc2NWEjJC1Q,17268
970
970
  supervisely/nn/training/__init__.py,sha256=gY4PCykJ-42MWKsqb9kl-skemKa8yB6t_fb5kzqR66U,111
971
- supervisely/nn/training/train_app.py,sha256=ropUF_M9RfijQ3XheqEtYl0Soix-69CgZeOnYiCIuI4,95088
971
+ supervisely/nn/training/train_app.py,sha256=mxoD8sgSuIc3B-LcieP9m1lEaUawuOuLeceRqDU6l6U,100168
972
972
  supervisely/nn/training/gui/__init__.py,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
973
973
  supervisely/nn/training/gui/classes_selector.py,sha256=8UgzA4aogOAr1s42smwEcDbgaBj_i0JLhjwlZ9bFdIA,3772
974
- supervisely/nn/training/gui/gui.py,sha256=nj4EVppoV9ZjLN0rVO0GKxmI56d6Qpp0qwnJJ6srT6w,23712
975
- supervisely/nn/training/gui/hyperparameters_selector.py,sha256=2qryuBss0bLcZJV8PNJ6_hKZM5Dbj2FIxTb3EULHQrE,6670
974
+ supervisely/nn/training/gui/gui.py,sha256=uXpNWU6q741PimYGZ5aFWpr0boewa0yDrpIz8JGcqXE,25004
975
+ supervisely/nn/training/gui/hyperparameters_selector.py,sha256=UAXZYyhuUOY7d2ZKAx4R5Kz-KQaiFZ7AnY8BDoj3_30,7071
976
976
  supervisely/nn/training/gui/input_selector.py,sha256=Jp9PnVVADv1fhndPuZdMlKuzWTOBQZogrOks5dwATlc,2179
977
- supervisely/nn/training/gui/model_selector.py,sha256=QTFHMf-8-rREYPk64QKoRvE4zKPC8V6tcP4H4N6nyt0,4082
977
+ supervisely/nn/training/gui/model_selector.py,sha256=n2Xn6as60bNPtSlImJtyrVEo0gjKnvHLT3yq_m39TXk,4334
978
978
  supervisely/nn/training/gui/train_val_splits_selector.py,sha256=MLryFD2Tj_RobkFzZOeQXzXpch0eGiVFisq3FGA3dFg,8549
979
979
  supervisely/nn/training/gui/training_artifacts.py,sha256=UpKI68S0h_nT_CEEKxBi1oeRsYVnocxRZZD4kUEnQ80,9584
980
980
  supervisely/nn/training/gui/training_logs.py,sha256=1CBqnL0l5kiZVaegJ-NLgOVI1T4EDB_rLAtumuw18Jo,3222
@@ -1070,9 +1070,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1070
1070
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1071
1071
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1072
1072
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1073
- supervisely-6.73.278.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1074
- supervisely-6.73.278.dist-info/METADATA,sha256=hb6AM2qZI9n04Q_wTnLDxK9HzaYVWV1LK2Cp8hBcy7o,33573
1075
- supervisely-6.73.278.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1076
- supervisely-6.73.278.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1077
- supervisely-6.73.278.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1078
- supervisely-6.73.278.dist-info/RECORD,,
1073
+ supervisely-6.73.279.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1074
+ supervisely-6.73.279.dist-info/METADATA,sha256=LtI71cTqLVkLsxFnPgUIWaWidSe5J_2h2Pt7DzEVWXA,33573
1075
+ supervisely-6.73.279.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1076
+ supervisely-6.73.279.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1077
+ supervisely-6.73.279.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1078
+ supervisely-6.73.279.dist-info/RECORD,,