supervisely 6.73.448__py3-none-any.whl → 6.73.449__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/nn/training/gui/train_val_splits_selector.py +52 -31
- {supervisely-6.73.448.dist-info → supervisely-6.73.449.dist-info}/METADATA +1 -1
- {supervisely-6.73.448.dist-info → supervisely-6.73.449.dist-info}/RECORD +7 -7
- {supervisely-6.73.448.dist-info → supervisely-6.73.449.dist-info}/LICENSE +0 -0
- {supervisely-6.73.448.dist-info → supervisely-6.73.449.dist-info}/WHEEL +0 -0
- {supervisely-6.73.448.dist-info → supervisely-6.73.449.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.448.dist-info → supervisely-6.73.449.dist-info}/top_level.txt +0 -0
@@ -180,7 +180,13 @@ class TrainValSplitsSelector:
|
|
180
180
|
return False
|
181
181
|
|
182
182
|
# Check if datasets are not empty
|
183
|
-
filters = [
|
183
|
+
filters = [
|
184
|
+
{
|
185
|
+
ApiField.FIELD: ApiField.ID,
|
186
|
+
ApiField.OPERATOR: "in",
|
187
|
+
ApiField.VALUE: train_dataset_id + val_dataset_id,
|
188
|
+
}
|
189
|
+
]
|
184
190
|
selected_datasets = self.api.dataset.get_list(self.project_id, filters, recursive=True)
|
185
191
|
datasets_count = {}
|
186
192
|
for dataset in selected_datasets:
|
@@ -334,6 +340,7 @@ class TrainValSplitsSelector:
|
|
334
340
|
|
335
341
|
def _detect_splits(self, collections_split: bool, datasets_split: bool) -> bool:
|
336
342
|
"""Detect splits based on the selected method"""
|
343
|
+
self._parse_collections()
|
337
344
|
splits_found = False
|
338
345
|
if collections_split:
|
339
346
|
splits_found = self._detect_collections()
|
@@ -341,47 +348,59 @@ class TrainValSplitsSelector:
|
|
341
348
|
splits_found = self._detect_datasets()
|
342
349
|
return splits_found
|
343
350
|
|
351
|
+
def _parse_collections(self) -> None:
|
352
|
+
"""Parse collections with train and val prefixes and set them to train_val_splits variables"""
|
353
|
+
all_collections = self.api.entities_collection.get_list(self.project_id)
|
354
|
+
existing_train_collections = [
|
355
|
+
collection for collection in all_collections if collection.name.startswith("train_")
|
356
|
+
]
|
357
|
+
existing_val_collections = [
|
358
|
+
collection for collection in all_collections if collection.name.startswith("val_")
|
359
|
+
]
|
360
|
+
|
361
|
+
self._all_train_collections = existing_train_collections
|
362
|
+
self._all_val_collections = existing_val_collections
|
363
|
+
self._latest_train_collection = self._get_latest_collection(existing_train_collections, "train")
|
364
|
+
self._latest_val_collection = self._get_latest_collection(existing_val_collections, "val")
|
365
|
+
|
366
|
+
def _get_latest_collection(
|
367
|
+
self, collections: List[EntitiesCollectionInfo], expected_prefix: str
|
368
|
+
) -> EntitiesCollectionInfo:
|
369
|
+
curr_collection = None
|
370
|
+
curr_idx = 0
|
371
|
+
for collection in collections:
|
372
|
+
parts = collection.name.split("_")
|
373
|
+
if len(parts) == 2:
|
374
|
+
prefix = parts[0].lower()
|
375
|
+
if prefix == expected_prefix:
|
376
|
+
if parts[1].isdigit():
|
377
|
+
collection_idx = int(parts[1])
|
378
|
+
if collection_idx > curr_idx:
|
379
|
+
curr_idx = collection_idx
|
380
|
+
curr_collection = collection
|
381
|
+
return curr_collection
|
382
|
+
|
383
|
+
|
344
384
|
def _detect_collections(self) -> bool:
|
345
385
|
"""Find collections with train and val prefixes and set them to train_val_splits"""
|
346
|
-
def _get_latest_collection(collections: List[EntitiesCollectionInfo]) -> EntitiesCollectionInfo:
|
347
|
-
curr_collection = None
|
348
|
-
curr_idx = 0
|
349
|
-
for collection in collections:
|
350
|
-
collection_idx = int(collection.name.rsplit('_', 1)[-1])
|
351
|
-
if collection_idx > curr_idx:
|
352
|
-
curr_idx = collection_idx
|
353
|
-
curr_collection = collection
|
354
|
-
return curr_collection
|
355
386
|
|
356
|
-
all_collections = self.api.entities_collection.get_list(self.project_id)
|
357
|
-
train_collections = []
|
358
|
-
val_collections = []
|
359
387
|
collections_found = False
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
val_collections.append(collection)
|
365
|
-
|
366
|
-
train_collection = _get_latest_collection(train_collections)
|
367
|
-
val_collection = _get_latest_collection(val_collections)
|
368
|
-
if train_collection is not None and val_collection is not None:
|
369
|
-
self.train_val_splits.set_collections_splits([train_collection.id], [val_collection.id])
|
388
|
+
if self._latest_train_collection is not None and self._latest_val_collection is not None:
|
389
|
+
self.train_val_splits.set_collections_splits(
|
390
|
+
[self._latest_train_collection.id], [self._latest_val_collection.id]
|
391
|
+
)
|
370
392
|
self.validator_text = Text("Train and val collections are detected", status="info")
|
371
393
|
self.validator_text.show()
|
372
394
|
collections_found = True
|
373
|
-
self._all_train_collections = train_collections
|
374
|
-
self._all_val_collections = val_collections
|
375
|
-
self._latest_train_collection = train_collection
|
376
|
-
self._latest_val_collection = val_collection
|
377
395
|
else:
|
378
396
|
self.validator_text = Text("")
|
379
397
|
self.validator_text.hide()
|
380
398
|
collections_found = False
|
381
399
|
return collections_found
|
382
|
-
|
400
|
+
|
383
401
|
def _detect_datasets(self) -> bool:
|
384
402
|
"""Find datasets with train and val prefixes and set them to train_val_splits"""
|
403
|
+
|
385
404
|
def _extend_with_nested(root_ds):
|
386
405
|
nested = self.api.dataset.get_nested(self.project_id, root_ds.id)
|
387
406
|
nested_ids = [ds.id for ds in nested]
|
@@ -407,7 +426,9 @@ class TrainValSplitsSelector:
|
|
407
426
|
val_count = len(train_val_dataset_ids["val"])
|
408
427
|
|
409
428
|
if train_count > 0 and val_count > 0:
|
410
|
-
self.train_val_splits.set_datasets_splits(
|
429
|
+
self.train_val_splits.set_datasets_splits(
|
430
|
+
train_val_dataset_ids["train"], train_val_dataset_ids["val"]
|
431
|
+
)
|
411
432
|
datasets_found = True
|
412
433
|
|
413
434
|
if train_count > 0 and val_count > 0:
|
@@ -415,7 +436,7 @@ class TrainValSplitsSelector:
|
|
415
436
|
message = "train and val datasets are detected"
|
416
437
|
else:
|
417
438
|
message = "Multiple train and val datasets are detected. Check manually if selection is correct"
|
418
|
-
|
439
|
+
|
419
440
|
self.validator_text = Text(message, status="info")
|
420
441
|
self.validator_text.show()
|
421
442
|
datasets_found = True
|
@@ -423,4 +444,4 @@ class TrainValSplitsSelector:
|
|
423
444
|
self.validator_text = Text("")
|
424
445
|
self.validator_text.hide()
|
425
446
|
datasets_found = False
|
426
|
-
return datasets_found
|
447
|
+
return datasets_found
|
@@ -1020,7 +1020,7 @@ supervisely/nn/training/gui/hyperparameters_selector.py,sha256=tEyppV5ay7nECi6qB
|
|
1020
1020
|
supervisely/nn/training/gui/input_selector.py,sha256=rmirJzpdxuYONI6y5_cvMdGWBJ--T20YTsISghATHu4,2510
|
1021
1021
|
supervisely/nn/training/gui/model_selector.py,sha256=YKBAk6MheulFEl9TF9_mVtE3-Hsc0B3LmeOzMiV6AlQ,7487
|
1022
1022
|
supervisely/nn/training/gui/tags_selector.py,sha256=0yg2OGPqiHUBp3iML2vrzTOVeSKtRtR9JoMy4Snx41U,3755
|
1023
|
-
supervisely/nn/training/gui/train_val_splits_selector.py,sha256=
|
1023
|
+
supervisely/nn/training/gui/train_val_splits_selector.py,sha256=NWyulmr3lBZU1tcT_2HXXVIag-vpQjSjzNvtqJ_f2kw,19409
|
1024
1024
|
supervisely/nn/training/gui/training_artifacts.py,sha256=ZyTnB9PyhwsqGAANwnpyLriJtAb4p0f03Yhmm_jkfIE,10946
|
1025
1025
|
supervisely/nn/training/gui/training_logs.py,sha256=GgEQMj9p98Z3p2b_-3BkHOhY7WQYELxctsRKmkbg3JY,4966
|
1026
1026
|
supervisely/nn/training/gui/training_process.py,sha256=XJ3ELyys_rBFmLQnI9qe3QhmfZ6U0CrK1FbI6d-Fbns,3664
|
@@ -1127,9 +1127,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
1127
1127
|
supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
|
1128
1128
|
supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
|
1129
1129
|
supervisely_lib/__init__.py,sha256=yRwzEQmVwSd6lUQoAUdBngKEOlnoQ6hA9ZcoZGJRNC4,331
|
1130
|
-
supervisely-6.73.
|
1131
|
-
supervisely-6.73.
|
1132
|
-
supervisely-6.73.
|
1133
|
-
supervisely-6.73.
|
1134
|
-
supervisely-6.73.
|
1135
|
-
supervisely-6.73.
|
1130
|
+
supervisely-6.73.449.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
1131
|
+
supervisely-6.73.449.dist-info/METADATA,sha256=d65TCju-7Q_u75UWtDWAgTcIRszBoX5r8KPONLcp0Bo,35480
|
1132
|
+
supervisely-6.73.449.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
|
1133
|
+
supervisely-6.73.449.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
|
1134
|
+
supervisely-6.73.449.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
|
1135
|
+
supervisely-6.73.449.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|