supervisely 6.73.276__py3-none-any.whl → 6.73.278__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -1,8 +1,27 @@
1
- from typing import List, Tuple, Union
2
-
3
- from supervisely import AnyGeometry, GraphNodes, Polygon, Rectangle, logger
4
- from supervisely.geometry.graph import KeypointsTemplate, Node
1
+ import os
2
+ import shutil
3
+ from pathlib import Path
4
+ from typing import Callable, List, Literal, Optional, Tuple, Union
5
+
6
+ import yaml
7
+ from tqdm import tqdm
8
+
9
+ from supervisely._utils import generate_free_name
10
+ from supervisely.annotation.annotation import Annotation
11
+ from supervisely.annotation.label import Label
12
+ from supervisely.geometry.alpha_mask import AlphaMask
13
+ from supervisely.geometry.any_geometry import AnyGeometry
14
+ from supervisely.geometry.bitmap import Bitmap
15
+ from supervisely.geometry.graph import GraphNodes, KeypointsTemplate, Node
16
+ from supervisely.geometry.polygon import Polygon
17
+ from supervisely.geometry.polyline import Polyline
18
+ from supervisely.geometry.rectangle import Rectangle
5
19
  from supervisely.imaging.color import generate_rgb
20
+ from supervisely.io.fs import get_file_name_with_ext, touch
21
+ from supervisely.project.project import Dataset, OpenMode, Project
22
+ from supervisely.project.project_meta import ProjectMeta
23
+ from supervisely.sly_logger import logger
24
+ from supervisely.task.progress import tqdm_sly
6
25
 
7
26
  YOLO_DETECTION_COORDS_NUM = 4
8
27
  YOLO_SEGM_MIN_COORDS_NUM = 6
@@ -322,3 +341,319 @@ def get_geometry(
322
341
  num_keypoints=num_keypoints,
323
342
  num_dims=num_dims,
324
343
  )
344
+
345
+
346
+ def rectangle_to_yolo_line(
347
+ class_idx: int,
348
+ geometry: Rectangle,
349
+ img_height: int,
350
+ img_width: int,
351
+ ):
352
+ x = geometry.center.col / img_width
353
+ y = geometry.center.row / img_height
354
+ w = geometry.width / img_width
355
+ h = geometry.height / img_height
356
+ return f"{class_idx} {x:.6f} {y:.6f} {w:.6f} {h:.6f}"
357
+
358
+
359
+ def polygon_to_yolo_line(
360
+ class_idx: int,
361
+ geometry: Polygon,
362
+ img_height: int,
363
+ img_width: int,
364
+ ) -> str:
365
+ coords = []
366
+ for point in geometry.exterior:
367
+ x = point.col / img_width
368
+ y = point.row / img_height
369
+ coords.extend([x, y])
370
+ return f"{class_idx} {' '.join(map(lambda coord: f'{coord:.6f}', coords))}"
371
+
372
+
373
+ def keypoints_to_yolo_line(
374
+ class_idx: int,
375
+ geometry: GraphNodes,
376
+ img_height: int,
377
+ img_width: int,
378
+ max_kpts_count: int,
379
+ ):
380
+ bbox = geometry.to_bbox()
381
+ x, y, w, h = bbox.center.col, bbox.center.row, bbox.width, bbox.height
382
+ x, y, w, h = x / img_width, y / img_height, w / img_width, h / img_height
383
+
384
+ line = f"{class_idx} {x:.6f} {y:.6f} {w:.6f} {h:.6f}"
385
+
386
+ for node in geometry.nodes.values():
387
+ node: Node
388
+ visible = 2 if not node.disabled else 1
389
+ line += (
390
+ f" {node.location.col / img_width:.6f} {node.location.row / img_height:.6f} {visible}"
391
+ )
392
+ if len(geometry.nodes) < max_kpts_count:
393
+ for _ in range(max_kpts_count - len(geometry.nodes)):
394
+ line += " 0 0 0"
395
+
396
+ return line
397
+
398
+
399
+ def convert_label_geometry_if_needed(
400
+ label: Label,
401
+ task_type: Literal["detection", "segmentation", "pose"],
402
+ verbose: bool = False,
403
+ ) -> List[Label]:
404
+ if task_type == "detection":
405
+ available_geometry_type = Rectangle
406
+ convertable_geometry_types = [Polygon, GraphNodes, Bitmap, Polyline, AlphaMask, AnyGeometry]
407
+ elif task_type == "segmentation":
408
+ available_geometry_type = Polygon
409
+ convertable_geometry_types = [Bitmap, AlphaMask, AnyGeometry]
410
+ elif task_type == "pose":
411
+ available_geometry_type = GraphNodes
412
+ convertable_geometry_types = []
413
+ else:
414
+ raise ValueError(
415
+ f"Unsupported task type: {task_type}. "
416
+ "Supported types: 'detection', 'segmentation', 'pose'"
417
+ )
418
+
419
+ if label.obj_class.geometry_type == available_geometry_type:
420
+ return [label]
421
+
422
+ need_convert = label.obj_class.geometry_type in convertable_geometry_types
423
+
424
+ if need_convert:
425
+ new_obj_cls = label.obj_class.clone(geometry_type=available_geometry_type)
426
+ return label.convert(new_obj_cls)
427
+
428
+ if verbose:
429
+ logger.warning(
430
+ f"Label '{label.obj_class.name}' has unsupported geometry type: "
431
+ f"{type(label.obj_class.geometry_type)}. Skipping."
432
+ )
433
+ return []
434
+
435
+
436
+ def label_to_yolo_lines(
437
+ label: Label,
438
+ img_height: int,
439
+ img_width: int,
440
+ class_names: List[str],
441
+ task_type: Literal["detection", "segmentation", "pose"],
442
+ ) -> List[str]:
443
+ """
444
+ Convert the Supervisely Label to a line in the YOLO format.
445
+ """
446
+
447
+ labels = convert_label_geometry_if_needed(label, task_type)
448
+ class_idx = class_names.index(label.obj_class.name)
449
+
450
+ lines = []
451
+ for label in labels:
452
+ if task_type == "detection":
453
+ yolo_line = rectangle_to_yolo_line(
454
+ class_idx=class_idx,
455
+ geometry=label.geometry,
456
+ img_height=img_height,
457
+ img_width=img_width,
458
+ )
459
+ elif task_type == "segmentation":
460
+ yolo_line = polygon_to_yolo_line(
461
+ class_idx=class_idx,
462
+ geometry=label.geometry,
463
+ img_height=img_height,
464
+ img_width=img_width,
465
+ )
466
+ elif task_type == "pose":
467
+ nodes_field = label.obj_class.geometry_type.items_json_field
468
+ max_kpts_count = len(label.obj_class.geometry_config[nodes_field])
469
+ yolo_line = keypoints_to_yolo_line(
470
+ class_idx=class_idx,
471
+ geometry=label.geometry,
472
+ img_height=img_height,
473
+ img_width=img_width,
474
+ max_kpts_count=max_kpts_count,
475
+ )
476
+ else:
477
+ raise ValueError(f"Unsupported geometry type: {type(label.obj_class.geometry_type)}")
478
+
479
+ if yolo_line is not None:
480
+ lines.append(yolo_line)
481
+
482
+ return lines
483
+
484
+
485
+ def sly_ann_to_yolo(
486
+ ann: Annotation,
487
+ class_names: List[str],
488
+ task_type: Literal["detection", "segmentation", "pose"] = "detection",
489
+ ) -> List[str]:
490
+ """
491
+ Convert the Supervisely annotation to the YOLO format.
492
+ """
493
+
494
+ h, w = ann.img_size
495
+ yolo_lines = []
496
+ for label in ann.labels:
497
+ lines = label_to_yolo_lines(
498
+ label=label,
499
+ img_height=h,
500
+ img_width=w,
501
+ class_names=class_names,
502
+ task_type=task_type,
503
+ )
504
+ yolo_lines.extend(lines)
505
+ return yolo_lines
506
+
507
+
508
+ def sly_ds_to_yolo(
509
+ dataset: Dataset,
510
+ meta: ProjectMeta,
511
+ dest_dir: Optional[str] = None,
512
+ task_type: Literal["detection", "segmentation", "pose"] = "detection",
513
+ log_progress: bool = False,
514
+ progress_cb: Optional[Union[tqdm, Callable]] = None,
515
+ ) -> str:
516
+
517
+ if progress_cb is not None:
518
+ log_progress = False
519
+
520
+ if log_progress:
521
+ progress_cb = tqdm_sly(
522
+ desc=f"Converting dataset '{dataset.short_name}' to YOLO format",
523
+ total=len(dataset),
524
+ ).update
525
+
526
+ dest_dir = Path(dataset.path) / "yolo" if dest_dir is None else Path(dest_dir)
527
+ dest_dir.mkdir(parents=True, exist_ok=True)
528
+
529
+ # * create train and val directories
530
+ images_dir = dest_dir / "images"
531
+ labels_dir = dest_dir / "labels"
532
+ train_images_dir = images_dir / "train"
533
+ train_labels_dir = labels_dir / "train"
534
+ val_images_dir = images_dir / "val"
535
+ val_labels_dir = labels_dir / "val"
536
+ for dir_path in [train_images_dir, train_labels_dir, val_images_dir, val_labels_dir]:
537
+ dir_path.mkdir(parents=True, exist_ok=True)
538
+
539
+ # * convert annotations and copy images
540
+ class_names = [obj_class.name for obj_class in meta.obj_classes]
541
+ used_names = set(os.listdir(train_images_dir)) | set(os.listdir(val_images_dir))
542
+ for name in dataset.get_items_names():
543
+ ann_path = dataset.get_ann_path(name)
544
+ ann = Annotation.load_json_file(ann_path, meta)
545
+
546
+ images_dir = val_images_dir if ann.img_tags.get("val") else train_images_dir
547
+ labels_dir = val_labels_dir if ann.img_tags.get("val") else train_labels_dir
548
+
549
+ img_path = Path(dataset.get_img_path(name))
550
+ img_name = f"{dataset.short_name}_{get_file_name_with_ext(img_path)}"
551
+ img_name = generate_free_name(used_names, img_name, with_ext=True, extend_used_names=True)
552
+ shutil.copy2(img_path, images_dir / img_name)
553
+
554
+ label_path = str(labels_dir / f"{img_name}.txt")
555
+ yolo_lines = ann.to_yolo(class_names, task_type)
556
+ if len(yolo_lines) > 0:
557
+ with open(label_path, "w") as f:
558
+ f.write("\n".join(yolo_lines))
559
+ else:
560
+ touch(label_path)
561
+
562
+ if progress_cb is not None:
563
+ progress_cb(1)
564
+
565
+ # * save data config file if it does not exist
566
+ config_path = dest_dir / "data_config.yaml"
567
+ if not config_path.exists():
568
+ save_yolo_config(meta, dest_dir, with_keypoint=task_type == "pose")
569
+
570
+ return str(dest_dir)
571
+
572
+
573
+ def save_yolo_config(meta: ProjectMeta, dest_dir: str, with_keypoint: bool = False):
574
+ dest_dir = Path(dest_dir)
575
+ save_path = dest_dir / "data_config.yaml"
576
+ class_names = [c.name for c in meta.obj_classes]
577
+ class_colors = [c.color for c in meta.obj_classes]
578
+ data_yaml = {
579
+ "train": f"../{str(dest_dir.name)}/images/train",
580
+ "val": f"../{str(dest_dir.name)}/images/val",
581
+ "nc": len(class_names),
582
+ "names": class_names,
583
+ "colors": class_colors,
584
+ }
585
+ has_keypoints = any(c.geometry_type == GraphNodes for c in meta.obj_classes)
586
+ if has_keypoints and with_keypoint:
587
+ max_kpts_count = 0
588
+ for obj_class in meta.obj_classes:
589
+ if issubclass(obj_class.geometry_type, GraphNodes):
590
+ field_name = obj_class.geometry_type.items_json_field
591
+ max_kpts_count = max(max_kpts_count, len(obj_class.geometry_config[field_name]))
592
+ data_yaml["kpt_shape"] = [max_kpts_count, 3]
593
+ with open(save_path, "w") as f:
594
+ yaml.dump(data_yaml, f, default_flow_style=None)
595
+
596
+ logger.info(f"Data config file has been saved to {str(save_path)}")
597
+
598
+
599
+ def sly_project_to_yolo(
600
+ project: Union[Project, str],
601
+ dest_dir: Optional[str] = None,
602
+ task_type: Literal["detection", "segmentation", "pose"] = "detection",
603
+ log_progress: bool = False,
604
+ progress_cb: Optional[Callable] = None,
605
+ ):
606
+ """
607
+ Convert Supervisely project to YOLO format.
608
+
609
+ :param dest_dir: Destination directory.
610
+ :type dest_dir: :class:`str`, optional
611
+ :param log_progress: Show uploading progress bar.
612
+ :type log_progress: :class:`bool`
613
+ :param progress_cb: Function for tracking conversion progress (for all items in the project).
614
+ :type progress_cb: callable, optional
615
+ :return: None
616
+ :rtype: NoneType
617
+
618
+ :Usage example:
619
+
620
+ .. code-block:: python
621
+
622
+ import supervisely as sly
623
+
624
+ # Local folder with Project
625
+ project_directory = "/home/admin/work/supervisely/source/project"
626
+
627
+ # Convert Project to YOLO format
628
+ sly.Project(project_directory).to_yolo(log_progress=True)
629
+ """
630
+ if isinstance(project, str):
631
+ project = Project(project, mode=OpenMode.READ)
632
+
633
+ dest_dir = Path(project.directory).parent / "yolo" if dest_dir is None else Path(dest_dir)
634
+
635
+ dest_dir.mkdir(parents=True, exist_ok=True)
636
+ if len(os.listdir(dest_dir)) > 0:
637
+ raise FileExistsError(f"Directory {dest_dir} is not empty.")
638
+
639
+ if progress_cb is not None:
640
+ log_progress = False
641
+
642
+ if log_progress:
643
+ progress_cb = tqdm_sly(
644
+ desc="Converting Supervisely project to YOLO format", total=project.total_items
645
+ ).update
646
+
647
+ save_yolo_config(project.meta, dest_dir, with_keypoint=task_type == "pose")
648
+
649
+ for dataset in project.datasets:
650
+ dataset: Dataset
651
+ dataset.to_yolo(
652
+ meta=project.meta,
653
+ dest_dir=dest_dir,
654
+ task_type=task_type,
655
+ log_progress=log_progress,
656
+ progress_cb=progress_cb,
657
+ )
658
+ logger.info(f"Dataset '{dataset.short_name}' has been converted to YOLO format.")
659
+ logger.info(f"Project '{project.name}' has been converted to YOLO format.")
@@ -0,0 +1,8 @@
1
+ # Pointcloud
2
+ from supervisely.convert.pointcloud.sly.sly_pointcloud_converter import SLYPointcloudConverter
3
+ from supervisely.convert.pointcloud.las.las_converter import LasConverter
4
+ from supervisely.convert.pointcloud.ply.ply_converter import PlyConverter
5
+ from supervisely.convert.pointcloud.bag.bag_converter import BagConverter
6
+ from supervisely.convert.pointcloud.lyft.lyft_converter import LyftConverter
7
+ from supervisely.convert.pointcloud.nuscenes_conv.nuscenes_converter import NuscenesConverter
8
+ from supervisely.convert.pointcloud.kitti_3d.kitti_3d_converter import KITTI3DConverter
File without changes
@@ -0,0 +1,139 @@
1
+ import os
2
+ from pathlib import Path
3
+
4
+ from supervisely import PointcloudAnnotation, ProjectMeta, is_development, logger
5
+ from supervisely.api.api import Api, ApiField
6
+ from supervisely.convert.base_converter import AvailablePointcloudConverters
7
+ from supervisely.convert.pointcloud.kitti_3d import kitti_3d_helper
8
+ from supervisely.convert.pointcloud.pointcloud_converter import PointcloudConverter
9
+ from supervisely.io.fs import (
10
+ dirs_filter,
11
+ file_exists,
12
+ get_file_ext,
13
+ get_file_name,
14
+ get_file_name_with_ext,
15
+ list_files,
16
+ silent_remove,
17
+ )
18
+ from supervisely.pointcloud_annotation.pointcloud_object_collection import (
19
+ PointcloudObjectCollection,
20
+ )
21
+
22
+
23
+ class KITTI3DConverter(PointcloudConverter):
24
+ def __str__(self) -> str:
25
+ return AvailablePointcloudConverters.KITTI3D
26
+
27
+ @property
28
+ def key_file_ext(self) -> str:
29
+ return ".bin"
30
+
31
+ def validate_format(self) -> bool:
32
+ def _file_filter_fn(file_path):
33
+ return get_file_ext(file_path).lower() == self.key_file_ext
34
+
35
+ def _dir_filter_fn(path):
36
+ return all([(Path(path) / name).exists() for name in kitti_3d_helper.FOLDER_NAMES])
37
+
38
+ input_paths = [d for d in dirs_filter(self._input_data, _dir_filter_fn)]
39
+ if len(input_paths) == 0:
40
+ return False
41
+
42
+ input_path = input_paths[0]
43
+ velodyne_dir = os.path.join(input_path, "velodyne")
44
+ image_2_dir = os.path.join(input_path, "image_2")
45
+ label_2_dir = os.path.join(input_path, "label_2")
46
+ calib_dir = os.path.join(input_path, "calib")
47
+
48
+ self._items = []
49
+ velodyne_files = list_files(velodyne_dir, filter_fn=_file_filter_fn)
50
+ if len(velodyne_files) == 0:
51
+ return False
52
+
53
+ kitti_labels = []
54
+ for velodyne_path in velodyne_files:
55
+ file_name = get_file_name(velodyne_path)
56
+ image_path = os.path.join(image_2_dir, f"{file_name}.png")
57
+ label_path = os.path.join(label_2_dir, f"{file_name}.txt")
58
+ calib_path = os.path.join(calib_dir, f"{file_name}.txt")
59
+ if not file_exists(image_path):
60
+ logger.debug(f"Skipping item: {velodyne_path}. Image not found.")
61
+ continue
62
+ if not file_exists(label_path):
63
+ logger.debug(f"Skipping item: {velodyne_path}. Label not found.")
64
+ continue
65
+ if not file_exists(calib_path):
66
+ logger.debug(f"Skipping item: {velodyne_path}. Calibration not found.")
67
+ continue
68
+
69
+ label = kitti_3d_helper.read_kitti_label(label_path, calib_path)
70
+ kitti_labels.append(label)
71
+ self._items.append(self.Item(velodyne_path, label, (image_path, calib_path)))
72
+
73
+ self._meta = kitti_3d_helper.convert_labels_to_meta(kitti_labels)
74
+ return self.items_count > 0
75
+
76
+ def to_supervisely(
77
+ self,
78
+ item: PointcloudConverter.Item,
79
+ meta: ProjectMeta,
80
+ renamed_classes: dict = {},
81
+ renamed_tags: dict = {},
82
+ ) -> PointcloudAnnotation:
83
+ label = item.ann_data
84
+ objs, figures = kitti_3d_helper.convert_label_to_annotation(label, meta, renamed_classes)
85
+ return PointcloudAnnotation(PointcloudObjectCollection(objs), figures)
86
+
87
+ def upload_dataset(self, api: Api, dataset_id: int, batch_size: int = 1, log_progress=True):
88
+ meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
89
+
90
+ if log_progress:
91
+ progress, progress_cb = self.get_progress(self.items_count, "Converting pointclouds...")
92
+ else:
93
+ progress_cb = None
94
+
95
+ for item in self._items:
96
+ # * Convert pointcloud from ".bin" to ".pcd"
97
+ pcd_path = str(Path(item.path).with_suffix(".pcd"))
98
+ if file_exists(pcd_path):
99
+ logger.warning(f"Overwriting file with path: {pcd_path}")
100
+ kitti_3d_helper.convert_bin_to_pcd(item.path, pcd_path)
101
+
102
+ # * Upload pointcloud
103
+ pcd_name = get_file_name_with_ext(pcd_path)
104
+ info = api.pointcloud.upload_path(dataset_id, pcd_name, pcd_path, {})
105
+ pcd_id = info.id
106
+
107
+ # * Convert annotation and upload
108
+ ann = self.to_supervisely(item, meta, renamed_classes, renamed_tags)
109
+ api.pointcloud.annotation.append(pcd_id, ann)
110
+
111
+ # * Upload related images
112
+ image_path, calib_path = item._related_images
113
+ rimage_info = kitti_3d_helper.convert_calib_to_image_meta(image_path, calib_path)
114
+
115
+ image_jsons = []
116
+ camera_names = []
117
+ img = api.pointcloud.upload_related_image(image_path)
118
+ image_jsons.append(
119
+ {
120
+ ApiField.ENTITY_ID: pcd_id,
121
+ ApiField.NAME: get_file_name_with_ext(rimage_info[ApiField.NAME]),
122
+ ApiField.HASH: img,
123
+ ApiField.META: rimage_info[ApiField.META],
124
+ }
125
+ )
126
+ camera_names.append(rimage_info[ApiField.META]["deviceId"])
127
+ if len(image_jsons) > 0:
128
+ api.pointcloud.add_related_images(image_jsons, camera_names)
129
+
130
+ # * Clean up
131
+ silent_remove(pcd_path)
132
+ if log_progress:
133
+ progress_cb(1)
134
+
135
+ logger.info(f"Dataset ID:{dataset_id} has been successfully uploaded.")
136
+
137
+ if log_progress:
138
+ if is_development():
139
+ progress.close()
@@ -0,0 +1,110 @@
1
+ import numpy as np
2
+
3
+ from supervisely import ObjClass, ObjClassCollection, ProjectMeta
4
+ from supervisely.geometry.cuboid_3d import Cuboid3d
5
+ from supervisely.geometry.point_3d import Vector3d
6
+ from supervisely.pointcloud_annotation.pointcloud_figure import PointcloudFigure
7
+ from supervisely.pointcloud_annotation.pointcloud_object import PointcloudObject
8
+
9
+ FOLDER_NAMES = ["velodyne", "image_2", "label_2", "calib"]
10
+
11
+
12
+ def read_kitti_label(label_path, calib_path):
13
+ import open3d as o3d # pylint: disable=import-error
14
+
15
+ calib = o3d.ml.datasets.KITTI.read_calib(calib_path)
16
+ label = o3d.ml.datasets.KITTI.read_label(label_path, calib)
17
+ return label
18
+
19
+
20
+ def convert_labels_to_meta(labels):
21
+ labels = flatten(labels)
22
+ unique_labels = np.unique([l.label_class for l in labels])
23
+ obj_classes = [ObjClass(k, Cuboid3d) for k in unique_labels]
24
+ meta = ProjectMeta(obj_classes=ObjClassCollection(obj_classes))
25
+ return meta
26
+
27
+
28
+ def convert_bin_to_pcd(src, dst):
29
+ import open3d as o3d # pylint: disable=import-error
30
+
31
+ try:
32
+ bin = np.fromfile(src, dtype=np.float32).reshape(-1, 4)
33
+ except ValueError as e:
34
+ raise Exception(
35
+ f"Incorrect data in the KITTI 3D pointcloud file: {src}. "
36
+ f"There was an error while trying to reshape the data into a 4-column matrix: {e}. "
37
+ "Please ensure that the binary file contains a multiple of 4 elements to be "
38
+ "successfully reshaped into a (N, 4) array.\n"
39
+ )
40
+ points = bin[:, 0:3]
41
+ intensity = bin[:, -1]
42
+ intensity_fake_rgb = np.zeros((intensity.shape[0], 3))
43
+ intensity_fake_rgb[:, 0] = intensity
44
+ pc = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))
45
+ pc.colors = o3d.utility.Vector3dVector(intensity_fake_rgb)
46
+ o3d.io.write_point_cloud(dst, pc)
47
+
48
+
49
+ def flatten(list_2d):
50
+ return sum(list_2d, [])
51
+
52
+
53
+ def _convert_label_to_geometry(label):
54
+ geometries = []
55
+ for l in label:
56
+ bbox = l.to_xyzwhlr()
57
+ dim = bbox[[3, 5, 4]]
58
+ pos = bbox[:3] + [0, 0, dim[1] / 2]
59
+ yaw = bbox[-1]
60
+ position = Vector3d(float(pos[0]), float(pos[1]), float(pos[2]))
61
+ rotation = Vector3d(0, 0, float(-yaw))
62
+
63
+ dimension = Vector3d(float(dim[0]), float(dim[2]), float(dim[1]))
64
+ geometry = Cuboid3d(position, rotation, dimension)
65
+ geometries.append(geometry)
66
+ return geometries
67
+
68
+
69
+ def convert_label_to_annotation(label, meta, renamed_class_names: dict = None):
70
+ geometries = _convert_label_to_geometry(label)
71
+ figures = []
72
+ objs = []
73
+ for l, geometry in zip(label, geometries): # by object in point cloud
74
+ class_name = renamed_class_names.get(l.label_class, l.label_class)
75
+ pcobj = PointcloudObject(meta.get_obj_class(class_name))
76
+ figures.append(PointcloudFigure(pcobj, geometry))
77
+ objs.append(pcobj)
78
+
79
+ return objs, figures
80
+
81
+
82
+ def convert_calib_to_image_meta(image_name, calib_path, camera_num=2):
83
+ with open(calib_path, "r") as f:
84
+ lines = f.readlines()
85
+
86
+ assert 0 < camera_num < 4
87
+ intrinsic_matrix = lines[camera_num].strip().split(" ")[1:]
88
+ intrinsic_matrix = np.array(intrinsic_matrix, dtype=np.float32).reshape(3, 4)[:3, :3]
89
+
90
+ obj = lines[4].strip().split(" ")[1:]
91
+ rect_4x4 = np.eye(4, dtype=np.float32)
92
+ rect_4x4[:3, :3] = np.array(obj, dtype=np.float32).reshape(3, 3)
93
+
94
+ obj = lines[5].strip().split(" ")[1:]
95
+ Tr_velo_to_cam = np.eye(4, dtype=np.float32)
96
+ Tr_velo_to_cam[:3] = np.array(obj, dtype=np.float32).reshape(3, 4)
97
+ world_cam = np.transpose(rect_4x4 @ Tr_velo_to_cam)
98
+ extrinsic_matrix = world_cam[:4, :3].T
99
+
100
+ data = {
101
+ "name": image_name,
102
+ "meta": {
103
+ "deviceId": "CAM_LEFT",
104
+ "sensorsData": {
105
+ "extrinsicMatrix": list(extrinsic_matrix.flatten().astype(float)),
106
+ "intrinsicMatrix": list(intrinsic_matrix.flatten().astype(float)),
107
+ },
108
+ },
109
+ }
110
+ return data
@@ -0,0 +1,9 @@
1
+ # Pointcloud Episodes
2
+ from supervisely.convert.pointcloud_episodes.sly.sly_pointcloud_episodes_converter import (
3
+ SLYPointcloudEpisodesConverter,
4
+ )
5
+ from supervisely.convert.pointcloud_episodes.bag.bag_converter import BagEpisodesConverter
6
+ from supervisely.convert.pointcloud_episodes.lyft.lyft_converter import LyftEpisodesConverter
7
+ from supervisely.convert.pointcloud_episodes.nuscenes_conv.nuscenes_converter import (
8
+ NuscenesEpisodesConverter,
9
+ )
@@ -0,0 +1,3 @@
1
+ # Video
2
+ from supervisely.convert.video.mot.mot_converter import MOTConverter
3
+ from supervisely.convert.video.sly.sly_video_converter import SLYVideoConverter
@@ -0,0 +1,3 @@
1
+ # Volume
2
+ from supervisely.convert.volume.sly.sly_volume_converter import SLYVolumeConverter
3
+ from supervisely.convert.volume.dicom.dicom_converter import DICOMConverter
@@ -102,7 +102,7 @@ class TrainingArtifacts:
102
102
  apps = api.app.get_list(
103
103
  team_id,
104
104
  filter=[{"field": "name", "operator": "=", "value": app_name}],
105
- only_running=True,
105
+ only_running=False,
106
106
  )
107
107
  if len(apps) == 1:
108
108
  app_info = apps[0]
@@ -1674,8 +1674,9 @@ class TrainApp:
1674
1674
  self.gui.training_artifacts.model_benchmark_report_thumbnail.show()
1675
1675
  self.gui.training_artifacts.model_benchmark_report_field.show()
1676
1676
  else:
1677
- self.gui.training_artifacts.model_benchmark_fail_text.show()
1678
- self.gui.training_artifacts.model_benchmark_report_field.show()
1677
+ if self.gui.hyperparameters_selector.get_model_benchmark_checkbox_value():
1678
+ self.gui.training_artifacts.model_benchmark_fail_text.show()
1679
+ self.gui.training_artifacts.model_benchmark_report_field.show()
1679
1680
  # ---------------------------- #
1680
1681
 
1681
1682
  # Set instruction to GUI