supervisely 6.73.301__py3-none-any.whl → 6.73.303__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -5,6 +5,7 @@ import json
5
5
  import os
6
6
  import time
7
7
  from collections import OrderedDict, defaultdict
8
+ from pathlib import Path
8
9
 
9
10
  # docs
10
11
  from typing import Any, Callable, Dict, List, Literal, NamedTuple, Optional, Union
@@ -12,6 +13,7 @@ from typing import Any, Callable, Dict, List, Literal, NamedTuple, Optional, Uni
12
13
  from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
13
14
  from tqdm import tqdm
14
15
 
16
+ from supervisely import logger
15
17
  from supervisely._utils import batched, take_with_default
16
18
  from supervisely.api.module_api import (
17
19
  ApiField,
@@ -20,7 +22,12 @@ from supervisely.api.module_api import (
20
22
  WaitingTimeExceeded,
21
23
  )
22
24
  from supervisely.collection.str_enum import StrEnum
23
- from supervisely.io.fs import ensure_base_path, get_file_hash, get_file_name
25
+ from supervisely.io.fs import (
26
+ ensure_base_path,
27
+ get_file_hash,
28
+ get_file_name,
29
+ get_file_name_with_ext,
30
+ )
24
31
 
25
32
 
26
33
  class TaskFinishedWithError(Exception):
@@ -301,7 +308,10 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
301
308
  )
302
309
 
303
310
  def upload_dtl_archive(
304
- self, task_id: int, archive_path: str, progress_cb: Optional[Union[tqdm, Callable]] = None
311
+ self,
312
+ task_id: int,
313
+ archive_path: str,
314
+ progress_cb: Optional[Union[tqdm, Callable]] = None,
305
315
  ):
306
316
  """upload_dtl_archive"""
307
317
  encoder = MultipartEncoder(
@@ -822,11 +832,19 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
822
832
  return resp.json()
823
833
 
824
834
  def set_output_report(
825
- self, task_id: int, file_id: int, file_name: str, description: Optional[str] = "Report"
835
+ self,
836
+ task_id: int,
837
+ file_id: int,
838
+ file_name: str,
839
+ description: Optional[str] = "Report",
826
840
  ) -> Dict:
827
841
  """set_output_report"""
828
842
  return self._set_custom_output(
829
- task_id, file_id, file_name, description=description, icon="zmdi zmdi-receipt"
843
+ task_id,
844
+ file_id,
845
+ file_name,
846
+ description=description,
847
+ icon="zmdi zmdi-receipt",
830
848
  )
831
849
 
832
850
  def _set_custom_output(
@@ -942,7 +960,11 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
942
960
  )
943
961
 
944
962
  def update_meta(
945
- self, id: int, data: dict, agent_storage_folder: str = None, relative_app_dir: str = None
963
+ self,
964
+ id: int,
965
+ data: dict,
966
+ agent_storage_folder: str = None,
967
+ relative_app_dir: str = None,
946
968
  ):
947
969
  """
948
970
  Update given task metadata
@@ -1197,3 +1219,265 @@ class TaskApi(ModuleApiBase, ModuleWithStatus):
1197
1219
  "tasks.output.set", {ApiField.TASK_ID: task_id, ApiField.OUTPUT: output}
1198
1220
  )
1199
1221
  return resp.json()
1222
+
1223
+ def deploy_model_from_api(self, task_id, deploy_params):
1224
+ self.send_request(
1225
+ task_id,
1226
+ "deploy_from_api",
1227
+ data={"deploy_params": deploy_params},
1228
+ raise_error=True,
1229
+ )
1230
+
1231
+ def deploy_model_app(
1232
+ self,
1233
+ module_id: int,
1234
+ workspace_id: int,
1235
+ agent_id: Optional[int] = None,
1236
+ description: Optional[str] = "application description",
1237
+ params: Dict[str, Any] = None,
1238
+ log_level: Optional[Literal["info", "debug", "warning", "error"]] = "info",
1239
+ users_ids: Optional[List[int]] = None,
1240
+ app_version: Optional[str] = "",
1241
+ is_branch: Optional[bool] = False,
1242
+ task_name: Optional[str] = "pythonSpawned",
1243
+ restart_policy: Optional[Literal["never", "on_error"]] = "never",
1244
+ proxy_keep_url: Optional[bool] = False,
1245
+ redirect_requests: Optional[Dict[str, int]] = {},
1246
+ limit_by_workspace: bool = False,
1247
+ deploy_params: Dict[str, Any] = None,
1248
+ timeout: int = 100,
1249
+ ):
1250
+ if deploy_params is None:
1251
+ deploy_params = {}
1252
+ task_info = self.start(
1253
+ agent_id=agent_id,
1254
+ workspace_id=workspace_id,
1255
+ module_id=module_id,
1256
+ description=description,
1257
+ params=params,
1258
+ log_level=log_level,
1259
+ users_ids=users_ids,
1260
+ app_version=app_version,
1261
+ is_branch=is_branch,
1262
+ task_name=task_name,
1263
+ restart_policy=restart_policy,
1264
+ proxy_keep_url=proxy_keep_url,
1265
+ redirect_requests=redirect_requests,
1266
+ limit_by_workspace=limit_by_workspace,
1267
+ )
1268
+
1269
+ attempt_delay_sec = 10
1270
+ attempts = (timeout + attempt_delay_sec) // attempt_delay_sec
1271
+ ready = self._api.app.wait_until_ready_for_api_calls(
1272
+ task_info["id"], attempts, attempt_delay_sec
1273
+ )
1274
+ if not ready:
1275
+ raise TimeoutError(
1276
+ f"Task {task_info['id']} is not ready for API calls after {timeout} seconds."
1277
+ )
1278
+ self.deploy_model_from_api(task_info["id"], deploy_params=deploy_params)
1279
+ return task_info
1280
+
1281
+ def deploy_custom_model(
1282
+ self,
1283
+ team_id: int,
1284
+ workspace_id: int,
1285
+ artifacts_dir: str,
1286
+ checkpoint_name: str = None,
1287
+ agent_id: int = None,
1288
+ device: str = "cuda",
1289
+ ):
1290
+ """
1291
+ Deploy a custom model based on the artifacts directory.
1292
+
1293
+ :param workspace_id: Workspace ID in Supervisely.
1294
+ :type workspace_id: int
1295
+ :param artifacts_dir: Path to the artifacts directory.
1296
+ :type artifacts_dir: str
1297
+ :param checkpoint_name: Checkpoint name (with extension) to deploy.
1298
+ :type checkpoint_name: Optional[str]
1299
+ :param agent_id: Agent ID in Supervisely.
1300
+ :type agent_id: Optional[int]
1301
+ :param device: Device string (default is "cuda").
1302
+ :type device: str
1303
+ :raises ValueError: if validations fail.
1304
+ """
1305
+ from dataclasses import asdict
1306
+
1307
+ from supervisely.nn.artifacts import (
1308
+ RITM,
1309
+ RTDETR,
1310
+ Detectron2,
1311
+ MMClassification,
1312
+ MMDetection,
1313
+ MMDetection3,
1314
+ MMSegmentation,
1315
+ UNet,
1316
+ YOLOv5,
1317
+ YOLOv5v2,
1318
+ YOLOv8,
1319
+ )
1320
+ from supervisely.nn.experiments import get_experiment_info_by_artifacts_dir
1321
+ from supervisely.nn.utils import ModelSource, RuntimeType
1322
+
1323
+ if not isinstance(workspace_id, int) or workspace_id <= 0:
1324
+ raise ValueError(f"workspace_id must be a positive integer. Received: {workspace_id}")
1325
+ if not isinstance(artifacts_dir, str) or not artifacts_dir.strip():
1326
+ raise ValueError("artifacts_dir must be a non-empty string.")
1327
+
1328
+ workspace_info = self._api.workspace.get_info_by_id(workspace_id)
1329
+ if workspace_info is None:
1330
+ raise ValueError(f"Workspace with ID '{workspace_id}' not found.")
1331
+
1332
+ team_id = workspace_info.team_id
1333
+ logger.debug(
1334
+ f"Starting model deployment. Team: {team_id}, Workspace: {workspace_id}, Artifacts Dir: '{artifacts_dir}'"
1335
+ )
1336
+
1337
+ # Train V1 logic (if artifacts_dir does not start with '/experiments')
1338
+ if not artifacts_dir.startswith("/experiments"):
1339
+ logger.debug("Deploying model from Train V1 artifacts")
1340
+ frameworks = {
1341
+ "/detectron2": Detectron2,
1342
+ "/mmclassification": MMClassification,
1343
+ "/mmdetection": MMDetection,
1344
+ "/mmdetection-3": MMDetection3,
1345
+ "/mmsegmentation": MMSegmentation,
1346
+ "/RITM_training": RITM,
1347
+ "/RT-DETR": RTDETR,
1348
+ "/unet": UNet,
1349
+ "/yolov5_train": YOLOv5,
1350
+ "/yolov5_2.0_train": YOLOv5v2,
1351
+ "/yolov8_train": YOLOv8,
1352
+ }
1353
+
1354
+ framework_cls = next(
1355
+ (cls for prefix, cls in frameworks.items() if artifacts_dir.startswith(prefix)),
1356
+ None,
1357
+ )
1358
+ if not framework_cls:
1359
+ raise ValueError(f"Unsupported framework for artifacts_dir: '{artifacts_dir}'")
1360
+
1361
+ framework = framework_cls(team_id)
1362
+ if framework_cls is RITM or framework_cls is YOLOv5:
1363
+ raise ValueError(
1364
+ f"{framework.framework_name} framework is not supported for deployment"
1365
+ )
1366
+
1367
+ logger.debug(f"Detected framework: '{framework.framework_name}'")
1368
+
1369
+ module_id = self._api.app.get_ecosystem_module_id(framework.serve_slug)
1370
+ serve_app_name = framework.serve_app_name
1371
+ logger.debug(f"Module ID fetched:' {module_id}'. App name: '{serve_app_name}'")
1372
+
1373
+ train_info = framework.get_info_by_artifacts_dir(artifacts_dir.rstrip("/"))
1374
+ if not hasattr(train_info, "checkpoints") or not train_info.checkpoints:
1375
+ raise ValueError("No checkpoints found in train info.")
1376
+
1377
+ checkpoint = None
1378
+ if checkpoint_name is not None:
1379
+ for cp in train_info.checkpoints:
1380
+ if cp.name == checkpoint_name:
1381
+ checkpoint = cp
1382
+ break
1383
+ if checkpoint is None:
1384
+ raise ValueError(f"Checkpoint '{checkpoint_name}' not found in train info.")
1385
+ else:
1386
+ logger.debug("Checkpoint name not provided. Using the last checkpoint.")
1387
+ checkpoint = train_info.checkpoints[-1]
1388
+
1389
+ checkpoint_name = checkpoint.name
1390
+ deploy_params = {
1391
+ "device": device,
1392
+ "model_source": ModelSource.CUSTOM,
1393
+ "task_type": train_info.task_type,
1394
+ "checkpoint_name": checkpoint_name,
1395
+ "checkpoint_url": checkpoint.path,
1396
+ }
1397
+
1398
+ if getattr(train_info, "config_path", None) is not None:
1399
+ deploy_params["config_url"] = train_info.config_path
1400
+
1401
+ if framework.require_runtime:
1402
+ deploy_params["runtime"] = RuntimeType.PYTORCH
1403
+
1404
+ else: # Train V2 logic (when artifacts_dir starts with '/experiments')
1405
+ logger.debug("Deploying model from Train V2 artifacts")
1406
+
1407
+ def get_framework_from_artifacts_dir(artifacts_dir: str) -> str:
1408
+ clean_path = artifacts_dir.rstrip("/")
1409
+ parts = clean_path.split("/")
1410
+ if not parts or "_" not in parts[-1]:
1411
+ raise ValueError(f"Invalid artifacts_dir format: '{artifacts_dir}'")
1412
+ return parts[-1].split("_", 1)[1]
1413
+
1414
+ # TODO: temporary solution, need to add Serve App Name into config.json
1415
+ framework_name = get_framework_from_artifacts_dir(artifacts_dir)
1416
+ logger.debug(f"Detected framework: {framework_name}")
1417
+
1418
+ modules = self._api.app.get_list_all_pages(
1419
+ method="ecosystem.list",
1420
+ data={"filter": [], "search": framework_name, "categories": ["serve"]},
1421
+ convert_json_info_cb=lambda x: x,
1422
+ )
1423
+ if not modules:
1424
+ raise ValueError(f"No serve apps found for framework: '{framework_name}'")
1425
+
1426
+ module = modules[0]
1427
+ module_id = module["id"]
1428
+ serve_app_name = module["name"]
1429
+ logger.debug(f"Serving app delected: '{serve_app_name}'. Module ID: '{module_id}'")
1430
+
1431
+ experiment_info = get_experiment_info_by_artifacts_dir(
1432
+ self._api, team_id, artifacts_dir
1433
+ )
1434
+ if not experiment_info:
1435
+ raise ValueError(
1436
+ f"Failed to retrieve experiment info for artifacts_dir: '{artifacts_dir}'"
1437
+ )
1438
+
1439
+ if len(experiment_info.checkpoints) == 0:
1440
+ raise ValueError(f"No checkpoints found in: '{artifacts_dir}'.")
1441
+
1442
+ checkpoint = None
1443
+ if checkpoint_name is not None:
1444
+ for checkpoint_path in experiment_info.checkpoints:
1445
+ if get_file_name_with_ext(checkpoint_path) == checkpoint_name:
1446
+ checkpoint = get_file_name_with_ext(checkpoint_path)
1447
+ break
1448
+ if checkpoint is None:
1449
+ raise ValueError(
1450
+ f"Provided checkpoint '{checkpoint_name}' not found. Using the best checkpoint."
1451
+ )
1452
+ else:
1453
+ logger.debug("Checkpoint name not provided. Using the best checkpoint.")
1454
+ checkpoint = experiment_info.best_checkpoint
1455
+
1456
+ checkpoint_name = checkpoint
1457
+ deploy_params = {
1458
+ "device": device,
1459
+ "model_source": ModelSource.CUSTOM,
1460
+ "model_files": {
1461
+ "checkpoint": f"{experiment_info.artifacts_dir}checkpoints/{checkpoint_name}"
1462
+ },
1463
+ "model_info": asdict(experiment_info),
1464
+ "runtime": RuntimeType.PYTORCH,
1465
+ }
1466
+ # TODO: add support for **kwargs
1467
+
1468
+ config = experiment_info.model_files.get("config")
1469
+ if config is not None:
1470
+ deploy_params["model_files"]["config"] = f"{experiment_info.artifacts_dir}{config}"
1471
+ logger.debug(f"Config file added: {experiment_info.artifacts_dir}{config}")
1472
+
1473
+ logger.info(
1474
+ f"{serve_app_name} app deployment started. Checkpoint: '{checkpoint_name}'. Deploy params: '{deploy_params}'"
1475
+ )
1476
+ self.deploy_model_app(
1477
+ module_id,
1478
+ workspace_id,
1479
+ agent_id,
1480
+ description=f"Deployed via deploy_custom_model",
1481
+ task_name=f"{serve_app_name} ({checkpoint_name})",
1482
+ deploy_params=deploy_params,
1483
+ )
@@ -3,14 +3,30 @@
3
3
  # Project
4
4
  from supervisely.convert.image.coco.coco_helper import sly_project_to_coco as project_to_coco
5
5
  from supervisely.convert.image.yolo.yolo_helper import sly_project_to_yolo as project_to_yolo
6
- from supervisely.convert.image.pascal_voc.pascal_voc_helper import sly_project_to_pascal_voc as project_to_pascal_voc
6
+ from supervisely.convert.image.pascal_voc.pascal_voc_helper import (
7
+ sly_project_to_pascal_voc as project_to_pascal_voc,
8
+ )
7
9
 
8
10
  # Dataset
9
11
  from supervisely.convert.image.coco.coco_helper import sly_ds_to_coco as dataset_to_coco
10
12
  from supervisely.convert.image.yolo.yolo_helper import sly_ds_to_yolo as dataset_to_yolo
11
- from supervisely.convert.image.pascal_voc.pascal_voc_helper import sly_ds_to_pascal_voc as dataset_to_pascal_voc
13
+ from supervisely.convert.image.pascal_voc.pascal_voc_helper import (
14
+ sly_ds_to_pascal_voc as dataset_to_pascal_voc,
15
+ )
12
16
 
13
17
  # Image Annotations
14
18
  from supervisely.convert.image.coco.coco_helper import sly_ann_to_coco as annotation_to_coco
15
19
  from supervisely.convert.image.yolo.yolo_helper import sly_ann_to_yolo as annotation_to_yolo
16
- from supervisely.convert.image.pascal_voc.pascal_voc_helper import sly_ann_to_pascal_voc as annotation_to_pascal_voc
20
+ from supervisely.convert.image.pascal_voc.pascal_voc_helper import (
21
+ sly_ann_to_pascal_voc as annotation_to_pascal_voc,
22
+ )
23
+
24
+
25
+ # Supervisely Project/Dataset/Annotation to COCO
26
+ from supervisely.convert.image.coco.coco_helper import to_coco
27
+
28
+ # Supervisely Project/Dataset/Annotation to YOLO
29
+ from supervisely.convert.image.yolo.yolo_helper import to_yolo
30
+
31
+ # Supervisely Project/Dataset/Annotation to Pascal VOC
32
+ from supervisely.convert.image.pascal_voc.pascal_voc_helper import to_pascal_voc
@@ -4,6 +4,7 @@ import sys
4
4
  import uuid
5
5
  from copy import deepcopy
6
6
  from datetime import datetime
7
+ from itertools import groupby
7
8
  from pathlib import Path
8
9
  from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
9
10
 
@@ -351,7 +352,8 @@ def _get_graph_info(idx, obj_class):
351
352
  return data
352
353
 
353
354
 
354
- def get_categories_from_meta(meta: ProjectMeta):
355
+ def get_categories_from_meta(meta: ProjectMeta) -> List[Dict[str, Any]]:
356
+ """Get categories from Supervisely project meta."""
355
357
  cat = lambda idx, c: {"supercategory": c.name, "id": idx, "name": c.name}
356
358
  return [
357
359
  cat(idx, c) if c.geometry_type != GraphNodes else _get_graph_info(idx, c)
@@ -359,6 +361,34 @@ def get_categories_from_meta(meta: ProjectMeta):
359
361
  ]
360
362
 
361
363
 
364
+ def extend_mask_up_to_image(
365
+ binary_mask: np.ndarray, image_shape: Tuple[int, int], origin: PointLocation
366
+ ) -> np.ndarray:
367
+ """Extend binary mask up to image shape."""
368
+ y, x = origin.col, origin.row
369
+ new_mask = np.zeros(image_shape, dtype=binary_mask.dtype)
370
+ try:
371
+ new_mask[x : x + binary_mask.shape[0], y : y + binary_mask.shape[1]] = binary_mask
372
+ except ValueError as e:
373
+ raise ValueError(
374
+ f"Binary mask size {binary_mask.shape} with origin {origin} "
375
+ f"exceeds image boundaries {image_shape}"
376
+ ) from e
377
+ return new_mask
378
+
379
+
380
+ def coco_segmentation_rle(segmentation: np.ndarray) -> Dict[str, Any]:
381
+ """Convert COCO segmentation to RLE format."""
382
+ binary_mask = np.asfortranarray(segmentation)
383
+ rle = {"counts": [], "size": list(binary_mask.shape)}
384
+ counts = rle.get("counts")
385
+ for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order="F"))):
386
+ if i == 0 and value == 1:
387
+ counts.append(0)
388
+ counts.append(len(list(elements)))
389
+ return rle
390
+
391
+
362
392
  def sly_ann_to_coco(
363
393
  ann: Annotation,
364
394
  coco_image_id: int,
@@ -491,17 +521,14 @@ def sly_ann_to_coco(
491
521
 
492
522
  res_inst = [] # result list of COCO objects
493
523
 
524
+ h, w = ann.img_size
494
525
  for binding_key, labels in ann.get_bindings().items():
495
526
  if binding_key is None:
496
527
  polygons = [l for l in labels if l.obj_class.geometry_type == Polygon]
497
528
  masks = [l for l in labels if l.obj_class.geometry_type == Bitmap]
498
529
  bboxes = [l for l in labels if l.obj_class.geometry_type == Rectangle]
499
530
  graphs = [l for l in labels if l.obj_class.geometry_type == GraphNodes]
500
- if len(masks) > 0:
501
- for l in masks:
502
- polygon_cls = l.obj_class.clone(geometry_type=Polygon)
503
- polygons.extend(l.convert(polygon_cls))
504
- for label in polygons + bboxes:
531
+ for label in polygons + bboxes + masks:
505
532
  cat_id = class_mapping[label.obj_class.name]
506
533
  coco_obj = coco_obj_template(label_id, coco_image_id, cat_id)
507
534
  coco_obj["bbox"] = _get_common_bbox([label])
@@ -510,6 +537,11 @@ def sly_ann_to_coco(
510
537
  poly = label.geometry.to_json()["points"]["exterior"]
511
538
  poly = np.array(poly).flatten().astype(float).tolist()
512
539
  coco_obj["segmentation"] = [poly]
540
+ elif label.obj_class.geometry_type == Bitmap:
541
+ segmentation = extend_mask_up_to_image(
542
+ label.geometry.data, (h, w), label.geometry.origin
543
+ )
544
+ coco_obj["segmentation"] = coco_segmentation_rle(segmentation)
513
545
 
514
546
  label_id = _update_inst_results(label_id, coco_ann, coco_obj, res_inst)
515
547
 
@@ -525,38 +557,60 @@ def sly_ann_to_coco(
525
557
  masks = [l for l in labels if l.obj_class.geometry_type == Bitmap]
526
558
  graphs = [l for l in labels if l.obj_class.geometry_type == GraphNodes]
527
559
 
528
- if len(masks) > 0: # convert Bitmap to Polygon
529
- for l in masks:
530
- polygon_cls = l.obj_class.clone(geometry_type=Polygon)
531
- polygons.extend(l.convert(polygon_cls))
560
+ need_to_process_separately = len(masks) > 0 and len(polygons) > 0
561
+ bbox_matched_w_mask = False
562
+ bbox_matched_w_poly = False
532
563
 
533
- matched_bbox = False
534
- if len(polygons) > 0: # process polygons
564
+ if len(graphs) > 0:
565
+ if len(masks) > 0 or len(polygons) > 0:
566
+ logger.warning(
567
+ "Keypoints and Polygons/Bitmaps in one binding key are not supported. "
568
+ "Objects will be converted separately."
569
+ )
570
+ if len(graphs) > 1:
571
+ logger.warning(
572
+ "Multiple Keypoints in one binding key are not supported. "
573
+ "Only the first graph will be converted."
574
+ )
575
+ cat_id = class_mapping[graphs[0].obj_class.name]
576
+ coco_obj = _create_keypoints_obj(graphs[0], cat_id, label_id, coco_image_id)
577
+ label_id = _update_inst_results(label_id, coco_ann, coco_obj, res_inst)
578
+
579
+ # convert Bitmap to Polygon
580
+ if len(masks) > 0:
581
+ for label in masks:
582
+ cat_id = class_mapping[label.obj_class.name]
583
+ coco_obj = coco_obj_template(label_id, coco_image_id, cat_id)
584
+ segmentation = extend_mask_up_to_image(
585
+ label.geometry.data, (h, w), label.geometry.origin
586
+ )
587
+ coco_obj["segmentation"] = coco_segmentation_rle(segmentation)
588
+ coco_obj["area"] = label.geometry.area
589
+ if len(bboxes) > 0 and not need_to_process_separately:
590
+ found = _get_common_bbox(bboxes, sly_bbox=True, approx=True)
591
+ new = _get_common_bbox([label], sly_bbox=True)
592
+ bbox_matched_w_mask = found.contains(new)
593
+ coco_obj["bbox"] = _get_common_bbox(bboxes if bbox_matched_w_mask else [label])
594
+ label_id = _update_inst_results(label_id, coco_ann, coco_obj, res_inst)
595
+
596
+ # process polygons
597
+ if len(polygons) > 0:
535
598
  cat_id = class_mapping[polygons[0].obj_class.name]
536
599
  coco_obj = coco_obj_template(label_id, coco_image_id, cat_id)
537
- if len(bboxes) > 0:
600
+ if len(bboxes) > 0 and not need_to_process_separately:
538
601
  found = _get_common_bbox(bboxes, sly_bbox=True, approx=True)
539
602
  new = _get_common_bbox(polygons, sly_bbox=True)
540
- matched_bbox = found.contains(new)
603
+ bbox_matched_w_poly = found.contains(new)
541
604
 
542
605
  polys = [l.geometry.to_json()["points"]["exterior"] for l in polygons]
543
606
  polys = [np.array(p).flatten().astype(float).tolist() for p in polys]
544
607
  coco_obj["segmentation"] = polys
545
608
  coco_obj["area"] = sum([l.geometry.area for l in polygons])
546
- coco_obj["bbox"] = _get_common_bbox(bboxes if matched_bbox else polygons)
547
- label_id = _update_inst_results(label_id, coco_ann, coco_obj, res_inst)
548
-
549
- if len(graphs) > 0:
550
- if len(graphs) > 1:
551
- logger.warning(
552
- "Multiple Keypoints in one binding key are not supported. "
553
- "Only the first graph will be converted."
554
- )
555
- cat_id = class_mapping[graphs[0].obj_class.name]
556
- coco_obj = _create_keypoints_obj(graphs[0], cat_id, label_id, coco_image_id)
609
+ coco_obj["bbox"] = _get_common_bbox(bboxes if bbox_matched_w_poly else polygons)
557
610
  label_id = _update_inst_results(label_id, coco_ann, coco_obj, res_inst)
558
611
 
559
- if len(bboxes) > 0 and not matched_bbox: # process bboxes separately
612
+ # process bboxes separately if they are not matched with masks/polygons
613
+ if len(bboxes) > 0 and not bbox_matched_w_poly and not bbox_matched_w_mask:
560
614
  for label in bboxes:
561
615
  cat_id = class_mapping[label.obj_class.name]
562
616
  coco_obj = coco_obj_template(label_id, coco_image_id, cat_id)
@@ -713,7 +767,9 @@ def sly_ds_to_coco(
713
767
 
714
768
  coco_ann["images"].append(image_coco(image_info, image_idx))
715
769
  if with_captions is True:
716
- coco_captions["images"].append(image_coco(image_info, image_idx)) # pylint: disable=unsubscriptable-object
770
+ # pylint: disable=unsubscriptable-object
771
+ coco_captions["images"].append(image_coco(image_info, image_idx))
772
+ # pylint: enable=unsubscriptable-object
717
773
 
718
774
  ann = Annotation.load_json_file(ann_path, meta)
719
775
  if ann.img_size is None or ann.img_size == (0, 0) or ann.img_size == (None, None):
@@ -815,3 +871,97 @@ def sly_project_to_coco(
815
871
  )
816
872
  logger.info(f"Dataset '{ds.short_name}' has been converted to COCO format.")
817
873
  logger.info(f"Project '{project.name}' has been converted to COCO format.")
874
+
875
+
876
+ def to_coco(
877
+ input_data: Union[Project, Dataset, str],
878
+ dest_dir: Optional[str] = None,
879
+ meta: Optional[ProjectMeta] = None,
880
+ copy_images: bool = True,
881
+ with_captions: bool = False,
882
+ log_progress: bool = True,
883
+ progress_cb: Optional[Callable] = None,
884
+ ) -> Union[None, str]:
885
+ """
886
+ Universal function to convert Supervisely project or dataset to COCO format.
887
+ Note:
888
+ - For better compatibility, please pass named arguments explicitly. Otherwise, the function may not work as expected.
889
+ You can use the dedicated functions for each data type:
890
+
891
+ - :func:`sly.convert.sly_project_to_coco()`
892
+ - :func:`sly.convert.sly_ds_to_coco()`
893
+
894
+ - If the input_data is a Project, the dest_dir parameters are required.
895
+ - If the input_data is a Dataset, the meta and dest_dir parameters are required.
896
+
897
+ :param input_data: Supervisely project, dataset, or path to the project or dataset.
898
+ :type input_data: :class:`Project<supervisely.project.project.Project>`, :class:`Dataset<supervisely.project.dataset.Dataset>` or :class:`str`
899
+
900
+ # Project or Dataset conversion arguments:
901
+ :param dest_dir: Destination directory to save project or dataset in COCO format.
902
+ :type dest_dir: :class:`str`, optional
903
+ :param meta: Project meta information (required for dataset conversion).
904
+ :type meta: :class:`ProjectMeta<supervisely.project.project_meta.ProjectMeta>`, optional
905
+ :param copy_images: Copy images to destination directory
906
+ :type copy_images: :class:`bool`, optional
907
+ :param with_captions: If True, returns COCO captions
908
+ :type with_captions: :class:`bool`, optional
909
+ :param log_progress: Show uploading progress bar
910
+ :type log_progress: :class:`bool`
911
+ :param progress_cb: Function for tracking conversion progress (for all items in the project or dataset).
912
+ :type progress_cb: callable, optional
913
+
914
+ :return: None
915
+ :rtype: NoneType
916
+
917
+ :Usage example:
918
+
919
+ .. code-block:: python
920
+
921
+ import supervisely as sly
922
+
923
+ # Local folder with Project in Supervisely format
924
+ project_directory = "./source/project"
925
+ project_fs = sly.Project(project_directory, sly.OpenMode.READ)
926
+
927
+ # Convert Project to COCO format
928
+ sly.convert.to_coco(project_directory, dest_dir="./coco")
929
+ # or
930
+ sly.convert.to_coco(project_fs, dest_dir="./coco")
931
+
932
+ # Convert Dataset to COCO format
933
+ # dataset: sly.Dataset
934
+ sly.convert.to_coco(dataset, dest_dir="./coco", meta=project_fs.meta)
935
+ """
936
+ if isinstance(input_data, str):
937
+ try:
938
+ input_data = Project(input_data, mode=OpenMode.READ)
939
+ except Exception:
940
+ try:
941
+ input_data = Dataset(input_data, mode=OpenMode.READ)
942
+ except Exception:
943
+ raise ValueError("Please check the path or the input data.")
944
+
945
+ if isinstance(input_data, Project):
946
+ return sly_project_to_coco(
947
+ project=input_data,
948
+ dest_dir=dest_dir,
949
+ copy_images=copy_images,
950
+ with_captions=with_captions,
951
+ log_progress=log_progress,
952
+ progress_cb=progress_cb,
953
+ )
954
+ if isinstance(input_data, Dataset):
955
+ if meta is None:
956
+ raise ValueError("Project meta information is required for dataset conversion.")
957
+ return sly_ds_to_coco(
958
+ dataset=input_data,
959
+ meta=meta,
960
+ return_type="path",
961
+ dest_dir=dest_dir,
962
+ copy_images=copy_images,
963
+ with_captions=with_captions,
964
+ log_progress=log_progress,
965
+ progress_cb=progress_cb,
966
+ )
967
+ raise ValueError("Unsupported input type. Only Project or Dataset are supported.")