supervisely 6.73.321__py3-none-any.whl → 6.73.323__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/annotation/annotation.py +1 -1
 - supervisely/convert/base_converter.py +2 -0
 - supervisely/convert/pointcloud_episodes/__init__.py +1 -0
 - supervisely/convert/pointcloud_episodes/kitti_360/__init__.py +0 -0
 - supervisely/convert/pointcloud_episodes/kitti_360/kitti_360_converter.py +242 -0
 - supervisely/convert/pointcloud_episodes/kitti_360/kitti_360_helper.py +386 -0
 - supervisely/convert/volume/__init__.py +1 -0
 - supervisely/convert/volume/nii/__init__.py +0 -0
 - supervisely/convert/volume/nii/nii_volume_converter.py +151 -0
 - supervisely/convert/volume/nii/nii_volume_helper.py +38 -0
 - supervisely/nn/inference/inference.py +155 -1
 - supervisely/volume/volume.py +43 -0
 - {supervisely-6.73.321.dist-info → supervisely-6.73.323.dist-info}/METADATA +1 -1
 - {supervisely-6.73.321.dist-info → supervisely-6.73.323.dist-info}/RECORD +18 -12
 - {supervisely-6.73.321.dist-info → supervisely-6.73.323.dist-info}/LICENSE +0 -0
 - {supervisely-6.73.321.dist-info → supervisely-6.73.323.dist-info}/WHEEL +0 -0
 - {supervisely-6.73.321.dist-info → supervisely-6.73.323.dist-info}/entry_points.txt +0 -0
 - {supervisely-6.73.321.dist-info → supervisely-6.73.323.dist-info}/top_level.txt +0 -0
 
| 
         @@ -404,7 +404,7 @@ class Annotation: 
     | 
|
| 
       404 
404 
     | 
    
         
             
                            f"Failed to deserialize one of the label from JSON format annotation: \n{repr(e)}"
         
     | 
| 
       405 
405 
     | 
    
         
             
                        )
         
     | 
| 
       406 
406 
     | 
    
         | 
| 
       407 
     | 
    
         
            -
                    custom_data = data.get(AnnotationJsonFields.CUSTOM_DATA, {})
         
     | 
| 
      
 407 
     | 
    
         
            +
                    custom_data = data.get(AnnotationJsonFields.CUSTOM_DATA, {}) or {}
         
     | 
| 
       408 
408 
     | 
    
         
             
                    prob_labels = None
         
     | 
| 
       409 
409 
     | 
    
         
             
                    if (
         
     | 
| 
       410 
410 
     | 
    
         
             
                        AnnotationJsonFields.PROBABILITY_LABELS in custom_data
         
     | 
| 
         @@ -65,11 +65,13 @@ class AvailablePointcloudEpisodesConverters: 
     | 
|
| 
       65 
65 
     | 
    
         
             
                SLY = "supervisely"
         
     | 
| 
       66 
66 
     | 
    
         
             
                BAG = "rosbag"
         
     | 
| 
       67 
67 
     | 
    
         
             
                LYFT = "lyft"
         
     | 
| 
      
 68 
     | 
    
         
            +
                KITTI360 = "kitti360"
         
     | 
| 
       68 
69 
     | 
    
         | 
| 
       69 
70 
     | 
    
         | 
| 
       70 
71 
     | 
    
         
             
            class AvailableVolumeConverters:
         
     | 
| 
       71 
72 
     | 
    
         
             
                SLY = "supervisely"
         
     | 
| 
       72 
73 
     | 
    
         
             
                DICOM = "dicom"
         
     | 
| 
      
 74 
     | 
    
         
            +
                NII = "nii"
         
     | 
| 
       73 
75 
     | 
    
         | 
| 
       74 
76 
     | 
    
         | 
| 
       75 
77 
     | 
    
         
             
            class BaseConverter:
         
     | 
| 
         @@ -7,3 +7,4 @@ from supervisely.convert.pointcloud_episodes.lyft.lyft_converter import LyftEpis 
     | 
|
| 
       7 
7 
     | 
    
         
             
            from supervisely.convert.pointcloud_episodes.nuscenes_conv.nuscenes_converter import (
         
     | 
| 
       8 
8 
     | 
    
         
             
                NuscenesEpisodesConverter,
         
     | 
| 
       9 
9 
     | 
    
         
             
            )
         
     | 
| 
      
 10 
     | 
    
         
            +
            from supervisely.convert.pointcloud_episodes.kitti_360.kitti_360_converter import KITTI360Converter
         
     | 
| 
         
            File without changes
         
     | 
| 
         @@ -0,0 +1,242 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            import os
         
     | 
| 
      
 2 
     | 
    
         
            +
            from pathlib import Path
         
     | 
| 
      
 3 
     | 
    
         
            +
            from typing import Optional, List
         
     | 
| 
      
 4 
     | 
    
         
            +
            from supervisely import PointcloudEpisodeAnnotation, ProjectMeta, is_development, logger, ObjClass, ObjClassCollection
         
     | 
| 
      
 5 
     | 
    
         
            +
            from supervisely.geometry.cuboid_3d import Cuboid3d
         
     | 
| 
      
 6 
     | 
    
         
            +
            from supervisely.api.api import Api, ApiField
         
     | 
| 
      
 7 
     | 
    
         
            +
            from supervisely.convert.base_converter import AvailablePointcloudEpisodesConverters
         
     | 
| 
      
 8 
     | 
    
         
            +
            from supervisely.convert.pointcloud_episodes.kitti_360.kitti_360_helper import *
         
     | 
| 
      
 9 
     | 
    
         
            +
            from supervisely.convert.pointcloud_episodes.pointcloud_episodes_converter import PointcloudEpisodeConverter
         
     | 
| 
      
 10 
     | 
    
         
            +
            from supervisely.io.fs import (
         
     | 
| 
      
 11 
     | 
    
         
            +
                file_exists,
         
     | 
| 
      
 12 
     | 
    
         
            +
                get_file_name,
         
     | 
| 
      
 13 
     | 
    
         
            +
                get_file_name_with_ext,
         
     | 
| 
      
 14 
     | 
    
         
            +
                list_files_recursively,
         
     | 
| 
      
 15 
     | 
    
         
            +
                silent_remove,
         
     | 
| 
      
 16 
     | 
    
         
            +
            )
         
     | 
| 
      
 17 
     | 
    
         
            +
            from supervisely.pointcloud_annotation.pointcloud_episode_frame_collection import PointcloudEpisodeFrameCollection
         
     | 
| 
      
 18 
     | 
    
         
            +
            from supervisely.pointcloud_annotation.pointcloud_episode_object_collection import PointcloudEpisodeObjectCollection
         
     | 
| 
      
 19 
     | 
    
         
            +
            from supervisely.pointcloud_annotation.pointcloud_episode_object import PointcloudEpisodeObject
         
     | 
| 
      
 20 
     | 
    
         
            +
            from supervisely.pointcloud_annotation.pointcloud_episode_frame import PointcloudEpisodeFrame
         
     | 
| 
      
 21 
     | 
    
         
            +
            from supervisely.pointcloud_annotation.pointcloud_figure import PointcloudFigure
         
     | 
| 
      
 22 
     | 
    
         
            +
             
     | 
| 
      
 23 
     | 
    
         
            +
            class KITTI360Converter(PointcloudEpisodeConverter):
         
     | 
| 
      
 24 
     | 
    
         
            +
             
     | 
| 
      
 25 
     | 
    
         
            +
                class Item:
         
     | 
| 
      
 26 
     | 
    
         
            +
             
     | 
| 
      
 27 
     | 
    
         
            +
                    def __init__(
         
     | 
| 
      
 28 
     | 
    
         
            +
                        self,
         
     | 
| 
      
 29 
     | 
    
         
            +
                        scene_name: str,
         
     | 
| 
      
 30 
     | 
    
         
            +
                        frame_paths: List[str],
         
     | 
| 
      
 31 
     | 
    
         
            +
                        ann_data: Annotation3D,
         
     | 
| 
      
 32 
     | 
    
         
            +
                        poses_path: str,
         
     | 
| 
      
 33 
     | 
    
         
            +
                        related_images: Optional[tuple] = None,
         
     | 
| 
      
 34 
     | 
    
         
            +
                        custom_data: Optional[dict] = None,
         
     | 
| 
      
 35 
     | 
    
         
            +
                    ):
         
     | 
| 
      
 36 
     | 
    
         
            +
                        self._scene_name = scene_name
         
     | 
| 
      
 37 
     | 
    
         
            +
                        self._frame_paths = frame_paths
         
     | 
| 
      
 38 
     | 
    
         
            +
                        self._ann_data = ann_data
         
     | 
| 
      
 39 
     | 
    
         
            +
                        self._poses_path = poses_path
         
     | 
| 
      
 40 
     | 
    
         
            +
                        self._related_images = related_images or []
         
     | 
| 
      
 41 
     | 
    
         
            +
             
     | 
| 
      
 42 
     | 
    
         
            +
                        self._type = "point_cloud_episode"
         
     | 
| 
      
 43 
     | 
    
         
            +
                        self._custom_data = custom_data if custom_data is not None else {}
         
     | 
| 
      
 44 
     | 
    
         
            +
             
     | 
| 
      
 45 
     | 
    
         
            +
                def __init__(self, *args, **kwargs):
         
     | 
| 
      
 46 
     | 
    
         
            +
                    self._calib_path = None
         
     | 
| 
      
 47 
     | 
    
         
            +
                    super().__init__(*args, **kwargs)
         
     | 
| 
      
 48 
     | 
    
         
            +
             
     | 
| 
      
 49 
     | 
    
         
            +
                def __str__(self) -> str:
         
     | 
| 
      
 50 
     | 
    
         
            +
                    return AvailablePointcloudEpisodesConverters.KITTI360
         
     | 
| 
      
 51 
     | 
    
         
            +
             
     | 
| 
      
 52 
     | 
    
         
            +
                @property
         
     | 
| 
      
 53 
     | 
    
         
            +
                def key_file_ext(self) -> str:
         
     | 
| 
      
 54 
     | 
    
         
            +
                    return ".bin"
         
     | 
| 
      
 55 
     | 
    
         
            +
             
     | 
| 
      
 56 
     | 
    
         
            +
                def validate_format(self) -> bool:
         
     | 
| 
      
 57 
     | 
    
         
            +
                    try:
         
     | 
| 
      
 58 
     | 
    
         
            +
                        import kitti360scripts
         
     | 
| 
      
 59 
     | 
    
         
            +
                    except ImportError:
         
     | 
| 
      
 60 
     | 
    
         
            +
                        logger.warn("Please run 'pip install kitti360Scripts' to import KITTI-360 data.")
         
     | 
| 
      
 61 
     | 
    
         
            +
                        return False
         
     | 
| 
      
 62 
     | 
    
         
            +
             
     | 
| 
      
 63 
     | 
    
         
            +
                    self._items = []
         
     | 
| 
      
 64 
     | 
    
         
            +
                    subdirs = os.listdir(self._input_data)
         
     | 
| 
      
 65 
     | 
    
         
            +
                    if len(subdirs) == 1:
         
     | 
| 
      
 66 
     | 
    
         
            +
                        self._input_data = os.path.join(self._input_data, subdirs[0])
         
     | 
| 
      
 67 
     | 
    
         
            +
             
     | 
| 
      
 68 
     | 
    
         
            +
                    # * Get calibration path
         
     | 
| 
      
 69 
     | 
    
         
            +
                    calib_dir = next(iter([(Path(path).parent).as_posix() for path in list_files_recursively(self._input_data, [".txt"], None, True) if Path(path).stem.startswith("calib")]), None)
         
     | 
| 
      
 70 
     | 
    
         
            +
                    if calib_dir is None:
         
     | 
| 
      
 71 
     | 
    
         
            +
                        return False
         
     | 
| 
      
 72 
     | 
    
         
            +
                    self._calib_path = calib_dir
         
     | 
| 
      
 73 
     | 
    
         
            +
             
     | 
| 
      
 74 
     | 
    
         
            +
                    # * Get pointcloud files paths
         
     | 
| 
      
 75 
     | 
    
         
            +
                    velodyne_files = list_files_recursively(self._input_data, [".bin"], None, True)
         
     | 
| 
      
 76 
     | 
    
         
            +
                    if len(velodyne_files) == 0:
         
     | 
| 
      
 77 
     | 
    
         
            +
                        return False
         
     | 
| 
      
 78 
     | 
    
         
            +
             
     | 
| 
      
 79 
     | 
    
         
            +
                    # * Get annotation files paths and related images
         
     | 
| 
      
 80 
     | 
    
         
            +
                    boxes_ann_files = list_files_recursively(self._input_data, [".xml"], None, True)
         
     | 
| 
      
 81 
     | 
    
         
            +
                    if len(boxes_ann_files) == 0:
         
     | 
| 
      
 82 
     | 
    
         
            +
                        return False
         
     | 
| 
      
 83 
     | 
    
         
            +
                    rimage_files = list_files_recursively(self._input_data, [".png"], None, True)
         
     | 
| 
      
 84 
     | 
    
         
            +
             
     | 
| 
      
 85 
     | 
    
         
            +
                    kitti_anns = []
         
     | 
| 
      
 86 
     | 
    
         
            +
                    for ann_file in boxes_ann_files:
         
     | 
| 
      
 87 
     | 
    
         
            +
                        key_name = Path(ann_file).stem
         
     | 
| 
      
 88 
     | 
    
         
            +
             
     | 
| 
      
 89 
     | 
    
         
            +
                        # * Get pointcloud files
         
     | 
| 
      
 90 
     | 
    
         
            +
                        frame_paths = []
         
     | 
| 
      
 91 
     | 
    
         
            +
                        for path in velodyne_files:
         
     | 
| 
      
 92 
     | 
    
         
            +
                            if key_name in Path(path).parts:
         
     | 
| 
      
 93 
     | 
    
         
            +
                                frame_paths.append(path)
         
     | 
| 
      
 94 
     | 
    
         
            +
                        if len(frame_paths) == 0:
         
     | 
| 
      
 95 
     | 
    
         
            +
                            logger.warn("No frames found for name: %s", key_name)
         
     | 
| 
      
 96 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 97 
     | 
    
         
            +
             
     | 
| 
      
 98 
     | 
    
         
            +
                        # * Get related images
         
     | 
| 
      
 99 
     | 
    
         
            +
                        rimages = []
         
     | 
| 
      
 100 
     | 
    
         
            +
                        for rimage in rimage_files:
         
     | 
| 
      
 101 
     | 
    
         
            +
                            path = Path(rimage)
         
     | 
| 
      
 102 
     | 
    
         
            +
                            if key_name in path.parts:
         
     | 
| 
      
 103 
     | 
    
         
            +
                                cam_name = path.parts[-3]
         
     | 
| 
      
 104 
     | 
    
         
            +
                                rimages.append((cam_name, rimage))
         
     | 
| 
      
 105 
     | 
    
         
            +
             
     | 
| 
      
 106 
     | 
    
         
            +
                        # * Get poses
         
     | 
| 
      
 107 
     | 
    
         
            +
                        poses_filter = (
         
     | 
| 
      
 108 
     | 
    
         
            +
                            lambda x: x.endswith("cam0_to_world.txt") and key_name in Path(x).parts
         
     | 
| 
      
 109 
     | 
    
         
            +
                        )
         
     | 
| 
      
 110 
     | 
    
         
            +
                        poses_path = next(
         
     | 
| 
      
 111 
     | 
    
         
            +
                            path
         
     | 
| 
      
 112 
     | 
    
         
            +
                            for path in list_files_recursively(self._input_data, [".txt"], None, True)
         
     | 
| 
      
 113 
     | 
    
         
            +
                            if poses_filter(path)
         
     | 
| 
      
 114 
     | 
    
         
            +
                        )
         
     | 
| 
      
 115 
     | 
    
         
            +
                        if poses_path is None:
         
     | 
| 
      
 116 
     | 
    
         
            +
                            logger.warn("No poses found for name: %s", key_name)
         
     | 
| 
      
 117 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 118 
     | 
    
         
            +
             
     | 
| 
      
 119 
     | 
    
         
            +
                        # * Parse annotation
         
     | 
| 
      
 120 
     | 
    
         
            +
                        ann = Annotation3D(ann_file)
         
     | 
| 
      
 121 
     | 
    
         
            +
                        kitti_anns.append(ann)
         
     | 
| 
      
 122 
     | 
    
         
            +
             
     | 
| 
      
 123 
     | 
    
         
            +
                        self._items.append(
         
     | 
| 
      
 124 
     | 
    
         
            +
                            self.Item(key_name, frame_paths, ann, poses_path, rimages)
         
     | 
| 
      
 125 
     | 
    
         
            +
                        )
         
     | 
| 
      
 126 
     | 
    
         
            +
             
     | 
| 
      
 127 
     | 
    
         
            +
                    # * Get object class names for meta
         
     | 
| 
      
 128 
     | 
    
         
            +
                    obj_class_names = set()
         
     | 
| 
      
 129 
     | 
    
         
            +
                    for ann in kitti_anns:
         
     | 
| 
      
 130 
     | 
    
         
            +
                        for obj in ann.get_objects():
         
     | 
| 
      
 131 
     | 
    
         
            +
                            obj_class_names.add(obj.name)
         
     | 
| 
      
 132 
     | 
    
         
            +
                    obj_classes = [ObjClass(obj_class, Cuboid3d) for obj_class in obj_class_names]
         
     | 
| 
      
 133 
     | 
    
         
            +
                    self._meta = ProjectMeta(obj_classes=ObjClassCollection(obj_classes))
         
     | 
| 
      
 134 
     | 
    
         
            +
                    return self.items_count > 0
         
     | 
| 
      
 135 
     | 
    
         
            +
             
     | 
| 
      
 136 
     | 
    
         
            +
                def to_supervisely(
         
     | 
| 
      
 137 
     | 
    
         
            +
                    self,
         
     | 
| 
      
 138 
     | 
    
         
            +
                    item,
         
     | 
| 
      
 139 
     | 
    
         
            +
                    meta: ProjectMeta,
         
     | 
| 
      
 140 
     | 
    
         
            +
                    renamed_classes: dict = {},
         
     | 
| 
      
 141 
     | 
    
         
            +
                    renamed_tags: dict = {},
         
     | 
| 
      
 142 
     | 
    
         
            +
                    static_transformations: StaticTransformations = None,
         
     | 
| 
      
 143 
     | 
    
         
            +
                ) -> PointcloudEpisodeAnnotation:
         
     | 
| 
      
 144 
     | 
    
         
            +
                    static_transformations.set_cam2world(item._poses_path)
         
     | 
| 
      
 145 
     | 
    
         
            +
             
     | 
| 
      
 146 
     | 
    
         
            +
                    frame_cnt = len(item._frame_paths)
         
     | 
| 
      
 147 
     | 
    
         
            +
                    objs, frames = [], []
         
     | 
| 
      
 148 
     | 
    
         
            +
             
     | 
| 
      
 149 
     | 
    
         
            +
                    frame_idx_to_figures = {idx: [] for idx in range(frame_cnt)}
         
     | 
| 
      
 150 
     | 
    
         
            +
                    for obj in item._ann_data.get_objects():
         
     | 
| 
      
 151 
     | 
    
         
            +
                        pcd_obj = PointcloudEpisodeObject(meta.get_obj_class(obj.name))
         
     | 
| 
      
 152 
     | 
    
         
            +
                        objs.append(pcd_obj)
         
     | 
| 
      
 153 
     | 
    
         
            +
             
     | 
| 
      
 154 
     | 
    
         
            +
                        for idx in range(frame_cnt):
         
     | 
| 
      
 155 
     | 
    
         
            +
                            if obj.start_frame <= idx <= obj.end_frame:
         
     | 
| 
      
 156 
     | 
    
         
            +
                                tr_matrix = static_transformations.world_to_velo_transformation(obj, idx)
         
     | 
| 
      
 157 
     | 
    
         
            +
                                geom = convert_kitti_cuboid_to_supervisely_geometry(tr_matrix)
         
     | 
| 
      
 158 
     | 
    
         
            +
                                frame_idx_to_figures[idx].append(PointcloudFigure(pcd_obj, geom, idx))
         
     | 
| 
      
 159 
     | 
    
         
            +
                    for idx, figures in frame_idx_to_figures.items():
         
     | 
| 
      
 160 
     | 
    
         
            +
                        frame = PointcloudEpisodeFrame(idx, figures)
         
     | 
| 
      
 161 
     | 
    
         
            +
                        frames.append(frame)
         
     | 
| 
      
 162 
     | 
    
         
            +
                    obj_collection = PointcloudEpisodeObjectCollection(objs)
         
     | 
| 
      
 163 
     | 
    
         
            +
                    frame_collection = PointcloudEpisodeFrameCollection(frames)
         
     | 
| 
      
 164 
     | 
    
         
            +
                    return PointcloudEpisodeAnnotation(
         
     | 
| 
      
 165 
     | 
    
         
            +
                        frame_cnt, objects=obj_collection, frames=frame_collection
         
     | 
| 
      
 166 
     | 
    
         
            +
                    )
         
     | 
| 
      
 167 
     | 
    
         
            +
             
     | 
| 
      
 168 
     | 
    
         
            +
                def upload_dataset(self, api: Api, dataset_id: int, batch_size: int = 1, log_progress=True):
         
     | 
| 
      
 169 
     | 
    
         
            +
                    meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
         
     | 
| 
      
 170 
     | 
    
         
            +
             
     | 
| 
      
 171 
     | 
    
         
            +
                    dataset_info = api.dataset.get_info_by_id(dataset_id)
         
     | 
| 
      
 172 
     | 
    
         
            +
                    if log_progress:
         
     | 
| 
      
 173 
     | 
    
         
            +
                        progress, progress_cb = self.get_progress(sum([len(item._frame_paths) for item in self._items]), "Converting pointcloud episodes...")
         
     | 
| 
      
 174 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 175 
     | 
    
         
            +
                        progress_cb = None
         
     | 
| 
      
 176 
     | 
    
         
            +
                    static_transformations = StaticTransformations(self._calib_path)
         
     | 
| 
      
 177 
     | 
    
         
            +
                    scene_ds = dataset_info
         
     | 
| 
      
 178 
     | 
    
         
            +
                    multiple_items = self.items_count > 1
         
     | 
| 
      
 179 
     | 
    
         
            +
                    for item in self._items:
         
     | 
| 
      
 180 
     | 
    
         
            +
                        scene_ds = api.dataset.create(dataset_info.project_id, item._scene_name, parent_id=dataset_id) if multiple_items else dataset_info
         
     | 
| 
      
 181 
     | 
    
         
            +
                        frame_to_pcd_ids = {}
         
     | 
| 
      
 182 
     | 
    
         
            +
                        for idx, frame_path in enumerate(item._frame_paths):
         
     | 
| 
      
 183 
     | 
    
         
            +
                            # * Convert pointcloud from ".bin" to ".pcd"
         
     | 
| 
      
 184 
     | 
    
         
            +
                            pcd_path = str(Path(frame_path).with_suffix(".pcd"))
         
     | 
| 
      
 185 
     | 
    
         
            +
                            if file_exists(pcd_path):
         
     | 
| 
      
 186 
     | 
    
         
            +
                                logger.warning(f"Overwriting file with path: {pcd_path}")
         
     | 
| 
      
 187 
     | 
    
         
            +
                            convert_bin_to_pcd(frame_path, pcd_path)
         
     | 
| 
      
 188 
     | 
    
         
            +
             
     | 
| 
      
 189 
     | 
    
         
            +
                            # * Upload pointcloud
         
     | 
| 
      
 190 
     | 
    
         
            +
                            pcd_name = get_file_name_with_ext(pcd_path)
         
     | 
| 
      
 191 
     | 
    
         
            +
                            info = api.pointcloud_episode.upload_path(scene_ds.id, pcd_name, pcd_path, {"frame": idx})
         
     | 
| 
      
 192 
     | 
    
         
            +
                            pcd_id = info.id
         
     | 
| 
      
 193 
     | 
    
         
            +
                            frame_to_pcd_ids[idx] = pcd_id
         
     | 
| 
      
 194 
     | 
    
         
            +
             
     | 
| 
      
 195 
     | 
    
         
            +
                            # * Clean up
         
     | 
| 
      
 196 
     | 
    
         
            +
                            silent_remove(pcd_path)
         
     | 
| 
      
 197 
     | 
    
         
            +
             
     | 
| 
      
 198 
     | 
    
         
            +
                            if log_progress:
         
     | 
| 
      
 199 
     | 
    
         
            +
                                progress_cb(1)
         
     | 
| 
      
 200 
     | 
    
         
            +
             
     | 
| 
      
 201 
     | 
    
         
            +
                        # * Upload photocontext
         
     | 
| 
      
 202 
     | 
    
         
            +
                        rimage_jsons = []
         
     | 
| 
      
 203 
     | 
    
         
            +
                        cam_names = []
         
     | 
| 
      
 204 
     | 
    
         
            +
                        hashes = api.pointcloud_episode.upload_related_images(
         
     | 
| 
      
 205 
     | 
    
         
            +
                            [rimage_path for _, rimage_path in item._related_images]
         
     | 
| 
      
 206 
     | 
    
         
            +
                        )
         
     | 
| 
      
 207 
     | 
    
         
            +
                        for (cam_name, rimage_path), img, pcd_id in zip(
         
     | 
| 
      
 208 
     | 
    
         
            +
                            item._related_images, hashes, list(frame_to_pcd_ids.values())
         
     | 
| 
      
 209 
     | 
    
         
            +
                        ):
         
     | 
| 
      
 210 
     | 
    
         
            +
                            cam_num = int(cam_name[-1])
         
     | 
| 
      
 211 
     | 
    
         
            +
                            rimage_info = convert_calib_to_image_meta(
         
     | 
| 
      
 212 
     | 
    
         
            +
                                get_file_name(rimage_path), static_transformations, cam_num
         
     | 
| 
      
 213 
     | 
    
         
            +
                            )
         
     | 
| 
      
 214 
     | 
    
         
            +
                            image_json = {
         
     | 
| 
      
 215 
     | 
    
         
            +
                                ApiField.ENTITY_ID: pcd_id,
         
     | 
| 
      
 216 
     | 
    
         
            +
                                ApiField.NAME: cam_name,
         
     | 
| 
      
 217 
     | 
    
         
            +
                                ApiField.HASH: img,
         
     | 
| 
      
 218 
     | 
    
         
            +
                                ApiField.META: rimage_info[ApiField.META],
         
     | 
| 
      
 219 
     | 
    
         
            +
                            }
         
     | 
| 
      
 220 
     | 
    
         
            +
                            rimage_jsons.append(image_json)
         
     | 
| 
      
 221 
     | 
    
         
            +
                            cam_names.append(cam_name)
         
     | 
| 
      
 222 
     | 
    
         
            +
                        if rimage_jsons:
         
     | 
| 
      
 223 
     | 
    
         
            +
                            api.pointcloud_episode.add_related_images(rimage_jsons, cam_names)
         
     | 
| 
      
 224 
     | 
    
         
            +
             
     | 
| 
      
 225 
     | 
    
         
            +
                        # * Convert annotation and upload
         
     | 
| 
      
 226 
     | 
    
         
            +
                        try:
         
     | 
| 
      
 227 
     | 
    
         
            +
                            ann = self.to_supervisely(
         
     | 
| 
      
 228 
     | 
    
         
            +
                                item, meta, renamed_classes, renamed_tags, static_transformations
         
     | 
| 
      
 229 
     | 
    
         
            +
                            )
         
     | 
| 
      
 230 
     | 
    
         
            +
                            api.pointcloud_episode.annotation.append(scene_ds.id, ann, frame_to_pcd_ids)
         
     | 
| 
      
 231 
     | 
    
         
            +
                        except Exception as e:
         
     | 
| 
      
 232 
     | 
    
         
            +
                            logger.error(
         
     | 
| 
      
 233 
     | 
    
         
            +
                                f"Failed to upload annotation for scene: {scene_ds.name}. Error: {repr(e)}",
         
     | 
| 
      
 234 
     | 
    
         
            +
                                stack_info=False,
         
     | 
| 
      
 235 
     | 
    
         
            +
                            )
         
     | 
| 
      
 236 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 237 
     | 
    
         
            +
             
     | 
| 
      
 238 
     | 
    
         
            +
                        logger.info(f"Dataset ID:{scene_ds.id} has been successfully uploaded.")
         
     | 
| 
      
 239 
     | 
    
         
            +
             
     | 
| 
      
 240 
     | 
    
         
            +
                    if log_progress:
         
     | 
| 
      
 241 
     | 
    
         
            +
                        if is_development():
         
     | 
| 
      
 242 
     | 
    
         
            +
                            progress.close()
         
     | 
| 
         @@ -0,0 +1,386 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            from supervisely import logger
         
     | 
| 
      
 2 
     | 
    
         
            +
            from supervisely.io.fs import get_file_name
         
     | 
| 
      
 3 
     | 
    
         
            +
            from supervisely.geometry.cuboid_3d import Cuboid3d
         
     | 
| 
      
 4 
     | 
    
         
            +
            from supervisely.geometry.point_3d import Vector3d
         
     | 
| 
      
 5 
     | 
    
         
            +
            from supervisely.geometry.point import Point
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            from collections import defaultdict
         
     | 
| 
      
 8 
     | 
    
         
            +
            import os
         
     | 
| 
      
 9 
     | 
    
         
            +
            import numpy as np
         
     | 
| 
      
 10 
     | 
    
         
            +
             
     | 
| 
      
 11 
     | 
    
         
            +
             
     | 
| 
      
 12 
     | 
    
         
            +
            MAX_N = 1000
         
     | 
| 
      
 13 
     | 
    
         
            +
             
     | 
| 
      
 14 
     | 
    
         
            +
             
     | 
| 
      
 15 
     | 
    
         
            +
            def local2global(semanticId, instanceId):
         
     | 
| 
      
 16 
     | 
    
         
            +
                globalId = semanticId * MAX_N + instanceId
         
     | 
| 
      
 17 
     | 
    
         
            +
                if isinstance(globalId, np.ndarray):
         
     | 
| 
      
 18 
     | 
    
         
            +
                    return globalId.astype(np.int)
         
     | 
| 
      
 19 
     | 
    
         
            +
                else:
         
     | 
| 
      
 20 
     | 
    
         
            +
                    return int(globalId)
         
     | 
| 
      
 21 
     | 
    
         
            +
             
     | 
| 
      
 22 
     | 
    
         
            +
             
     | 
| 
      
 23 
     | 
    
         
            +
            def global2local(globalId):
         
     | 
| 
      
 24 
     | 
    
         
            +
                semanticId = globalId // MAX_N
         
     | 
| 
      
 25 
     | 
    
         
            +
                instanceId = globalId % MAX_N
         
     | 
| 
      
 26 
     | 
    
         
            +
                if isinstance(globalId, np.ndarray):
         
     | 
| 
      
 27 
     | 
    
         
            +
                    return semanticId.astype(int), instanceId.astype(int)
         
     | 
| 
      
 28 
     | 
    
         
            +
                else:
         
     | 
| 
      
 29 
     | 
    
         
            +
                    return int(semanticId), int(instanceId)
         
     | 
| 
      
 30 
     | 
    
         
            +
             
     | 
| 
      
 31 
     | 
    
         
            +
             
     | 
| 
      
 32 
     | 
    
         
            +
            annotation2global = defaultdict()
         
     | 
| 
      
 33 
     | 
    
         
            +
             
     | 
| 
      
 34 
     | 
    
         
            +
             
     | 
| 
      
 35 
     | 
    
         
            +
            # Abstract base class for annotation objects
         
     | 
| 
      
 36 
     | 
    
         
            +
            class KITTI360Object:
         
     | 
| 
      
 37 
     | 
    
         
            +
                from abc import ABCMeta
         
     | 
| 
      
 38 
     | 
    
         
            +
             
     | 
| 
      
 39 
     | 
    
         
            +
                __metaclass__ = ABCMeta
         
     | 
| 
      
 40 
     | 
    
         
            +
             
     | 
| 
      
 41 
     | 
    
         
            +
                def __init__(self):
         
     | 
| 
      
 42 
     | 
    
         
            +
                    from matplotlib import cm
         
     | 
| 
      
 43 
     | 
    
         
            +
             
     | 
| 
      
 44 
     | 
    
         
            +
                    # the label
         
     | 
| 
      
 45 
     | 
    
         
            +
                    self.label = ""
         
     | 
| 
      
 46 
     | 
    
         
            +
             
     | 
| 
      
 47 
     | 
    
         
            +
                    # colormap
         
     | 
| 
      
 48 
     | 
    
         
            +
                    self.cmap = cm.get_cmap("Set1")
         
     | 
| 
      
 49 
     | 
    
         
            +
                    self.cmap_length = 9
         
     | 
| 
      
 50 
     | 
    
         
            +
             
     | 
| 
      
 51 
     | 
    
         
            +
                def getColor(self, idx):
         
     | 
| 
      
 52 
     | 
    
         
            +
                    if idx == 0:
         
     | 
| 
      
 53 
     | 
    
         
            +
                        return np.array([0, 0, 0])
         
     | 
| 
      
 54 
     | 
    
         
            +
                    return np.asarray(self.cmap(idx % self.cmap_length)[:3]) * 255.0
         
     | 
| 
      
 55 
     | 
    
         
            +
             
     | 
| 
      
 56 
     | 
    
         
            +
                # def assignColor(self):
         
     | 
| 
      
 57 
     | 
    
         
            +
                #     from kitti360scripts.helpers.labels import id2label  # pylint: disable=import-error
         
     | 
| 
      
 58 
     | 
    
         
            +
             
     | 
| 
      
 59 
     | 
    
         
            +
                #     if self.semanticId >= 0:
         
     | 
| 
      
 60 
     | 
    
         
            +
                #         self.semanticColor = id2label[self.semanticId].color
         
     | 
| 
      
 61 
     | 
    
         
            +
                #         if self.instanceId > 0:
         
     | 
| 
      
 62 
     | 
    
         
            +
                #             self.instanceColor = self.getColor(self.instanceId)
         
     | 
| 
      
 63 
     | 
    
         
            +
                #         else:
         
     | 
| 
      
 64 
     | 
    
         
            +
                #             self.instanceColor = self.semanticColor
         
     | 
| 
      
 65 
     | 
    
         
            +
             
     | 
| 
      
 66 
     | 
    
         
            +
             
     | 
| 
      
 67 
     | 
    
         
            +
            # Class that contains the information of a single annotated object as 3D bounding box
         
     | 
| 
      
 68 
     | 
    
         
            +
            class KITTI360Bbox3D(KITTI360Object):
         
     | 
| 
      
 69 
     | 
    
         
            +
                # Constructor
         
     | 
| 
      
 70 
     | 
    
         
            +
                def __init__(self):
         
     | 
| 
      
 71 
     | 
    
         
            +
                    KITTI360Object.__init__(self)
         
     | 
| 
      
 72 
     | 
    
         
            +
                    # the polygon as list of points
         
     | 
| 
      
 73 
     | 
    
         
            +
                    self.vertices = []
         
     | 
| 
      
 74 
     | 
    
         
            +
                    self.faces = []
         
     | 
| 
      
 75 
     | 
    
         
            +
                    self.lines = [
         
     | 
| 
      
 76 
     | 
    
         
            +
                        [0, 5],
         
     | 
| 
      
 77 
     | 
    
         
            +
                        [1, 4],
         
     | 
| 
      
 78 
     | 
    
         
            +
                        [2, 7],
         
     | 
| 
      
 79 
     | 
    
         
            +
                        [3, 6],
         
     | 
| 
      
 80 
     | 
    
         
            +
                        [0, 1],
         
     | 
| 
      
 81 
     | 
    
         
            +
                        [1, 3],
         
     | 
| 
      
 82 
     | 
    
         
            +
                        [3, 2],
         
     | 
| 
      
 83 
     | 
    
         
            +
                        [2, 0],
         
     | 
| 
      
 84 
     | 
    
         
            +
                        [4, 5],
         
     | 
| 
      
 85 
     | 
    
         
            +
                        [5, 7],
         
     | 
| 
      
 86 
     | 
    
         
            +
                        [7, 6],
         
     | 
| 
      
 87 
     | 
    
         
            +
                        [6, 4],
         
     | 
| 
      
 88 
     | 
    
         
            +
                    ]
         
     | 
| 
      
 89 
     | 
    
         
            +
             
     | 
| 
      
 90 
     | 
    
         
            +
                    # the ID of the corresponding object
         
     | 
| 
      
 91 
     | 
    
         
            +
                    self.semanticId = -1
         
     | 
| 
      
 92 
     | 
    
         
            +
                    self.instanceId = -1
         
     | 
| 
      
 93 
     | 
    
         
            +
                    self.annotationId = -1
         
     | 
| 
      
 94 
     | 
    
         
            +
             
     | 
| 
      
 95 
     | 
    
         
            +
                    # the window that contains the bbox
         
     | 
| 
      
 96 
     | 
    
         
            +
                    self.start_frame = -1
         
     | 
| 
      
 97 
     | 
    
         
            +
                    self.end_frame = -1
         
     | 
| 
      
 98 
     | 
    
         
            +
             
     | 
| 
      
 99 
     | 
    
         
            +
                    # timestamp of the bbox (-1 if statis)
         
     | 
| 
      
 100 
     | 
    
         
            +
                    self.timestamp = -1
         
     | 
| 
      
 101 
     | 
    
         
            +
             
     | 
| 
      
 102 
     | 
    
         
            +
                    # projected vertices
         
     | 
| 
      
 103 
     | 
    
         
            +
                    self.vertices_proj = None
         
     | 
| 
      
 104 
     | 
    
         
            +
                    self.meshes = []
         
     | 
| 
      
 105 
     | 
    
         
            +
             
     | 
| 
      
 106 
     | 
    
         
            +
                    # name
         
     | 
| 
      
 107 
     | 
    
         
            +
                    self.name = ""
         
     | 
| 
      
 108 
     | 
    
         
            +
             
     | 
| 
      
 109 
     | 
    
         
            +
                def __str__(self):
         
     | 
| 
      
 110 
     | 
    
         
            +
                    return self.name
         
     | 
| 
      
 111 
     | 
    
         
            +
             
     | 
| 
      
 112 
     | 
    
         
            +
                # def generateMeshes(self):
         
     | 
| 
      
 113 
     | 
    
         
            +
                #     self.meshes = []
         
     | 
| 
      
 114 
     | 
    
         
            +
                #     if self.vertices_proj:
         
     | 
| 
      
 115 
     | 
    
         
            +
                #         for fidx in range(self.faces.shape[0]):
         
     | 
| 
      
 116 
     | 
    
         
            +
                #             self.meshes.append(
         
     | 
| 
      
 117 
     | 
    
         
            +
                #                 [
         
     | 
| 
      
 118 
     | 
    
         
            +
                #                     Point(self.vertices_proj[0][int(x)], self.vertices_proj[1][int(x)])
         
     | 
| 
      
 119 
     | 
    
         
            +
                #                     for x in self.faces[fidx]
         
     | 
| 
      
 120 
     | 
    
         
            +
                #                 ]
         
     | 
| 
      
 121 
     | 
    
         
            +
                #             )
         
     | 
| 
      
 122 
     | 
    
         
            +
             
     | 
| 
      
 123 
     | 
    
         
            +
                def parseOpencvMatrix(self, node):
         
     | 
| 
      
 124 
     | 
    
         
            +
                    rows = int(node.find("rows").text)
         
     | 
| 
      
 125 
     | 
    
         
            +
                    cols = int(node.find("cols").text)
         
     | 
| 
      
 126 
     | 
    
         
            +
                    data = node.find("data").text.split(" ")
         
     | 
| 
      
 127 
     | 
    
         
            +
             
     | 
| 
      
 128 
     | 
    
         
            +
                    mat = []
         
     | 
| 
      
 129 
     | 
    
         
            +
                    for d in data:
         
     | 
| 
      
 130 
     | 
    
         
            +
                        d = d.replace("\n", "")
         
     | 
| 
      
 131 
     | 
    
         
            +
                        if len(d) < 1:
         
     | 
| 
      
 132 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 133 
     | 
    
         
            +
                        mat.append(float(d))
         
     | 
| 
      
 134 
     | 
    
         
            +
                    mat = np.reshape(mat, [rows, cols])
         
     | 
| 
      
 135 
     | 
    
         
            +
                    return mat
         
     | 
| 
      
 136 
     | 
    
         
            +
             
     | 
| 
      
 137 
     | 
    
         
            +
                def parseVertices(self, child):
         
     | 
| 
      
 138 
     | 
    
         
            +
                    transform = self.parseOpencvMatrix(child.find("transform"))
         
     | 
| 
      
 139 
     | 
    
         
            +
                    R = transform[:3, :3]
         
     | 
| 
      
 140 
     | 
    
         
            +
                    T = transform[:3, 3]
         
     | 
| 
      
 141 
     | 
    
         
            +
                    vertices = self.parseOpencvMatrix(child.find("vertices"))
         
     | 
| 
      
 142 
     | 
    
         
            +
                    faces = self.parseOpencvMatrix(child.find("faces"))
         
     | 
| 
      
 143 
     | 
    
         
            +
             
     | 
| 
      
 144 
     | 
    
         
            +
                    vertices = np.matmul(R, vertices.transpose()).transpose() + T
         
     | 
| 
      
 145 
     | 
    
         
            +
                    self.vertices = vertices
         
     | 
| 
      
 146 
     | 
    
         
            +
                    self.faces = faces
         
     | 
| 
      
 147 
     | 
    
         
            +
                    self.R = R
         
     | 
| 
      
 148 
     | 
    
         
            +
                    self.T = T
         
     | 
| 
      
 149 
     | 
    
         
            +
             
     | 
| 
      
 150 
     | 
    
         
            +
                    self.transform = transform
         
     | 
| 
      
 151 
     | 
    
         
            +
             
     | 
| 
      
 152 
     | 
    
         
            +
                def parseBbox(self, child):
         
     | 
| 
      
 153 
     | 
    
         
            +
                    from kitti360scripts.helpers.labels import kittiId2label  # pylint: disable=import-error
         
     | 
| 
      
 154 
     | 
    
         
            +
             
     | 
| 
      
 155 
     | 
    
         
            +
                    semanticIdKITTI = int(child.find("semanticId").text)
         
     | 
| 
      
 156 
     | 
    
         
            +
                    self.semanticId = kittiId2label[semanticIdKITTI].id
         
     | 
| 
      
 157 
     | 
    
         
            +
                    self.instanceId = int(child.find("instanceId").text)
         
     | 
| 
      
 158 
     | 
    
         
            +
                    # self.name = str(child.find('label').text)
         
     | 
| 
      
 159 
     | 
    
         
            +
                    self.name = kittiId2label[semanticIdKITTI].name
         
     | 
| 
      
 160 
     | 
    
         
            +
             
     | 
| 
      
 161 
     | 
    
         
            +
                    self.start_frame = int(child.find("start_frame").text)
         
     | 
| 
      
 162 
     | 
    
         
            +
                    self.end_frame = int(child.find("end_frame").text)
         
     | 
| 
      
 163 
     | 
    
         
            +
             
     | 
| 
      
 164 
     | 
    
         
            +
                    self.timestamp = int(child.find("timestamp").text)
         
     | 
| 
      
 165 
     | 
    
         
            +
             
     | 
| 
      
 166 
     | 
    
         
            +
                    self.annotationId = int(child.find("index").text) + 1
         
     | 
| 
      
 167 
     | 
    
         
            +
             
     | 
| 
      
 168 
     | 
    
         
            +
                    global annotation2global
         
     | 
| 
      
 169 
     | 
    
         
            +
                    annotation2global[self.annotationId] = local2global(self.semanticId, self.instanceId)
         
     | 
| 
      
 170 
     | 
    
         
            +
                    self.parseVertices(child)
         
     | 
| 
      
 171 
     | 
    
         
            +
             
     | 
| 
      
 172 
     | 
    
         
            +
                def parseStuff(self, child):
         
     | 
| 
      
 173 
     | 
    
         
            +
                    from kitti360scripts.helpers.labels import name2label  # pylint: disable=import-error
         
     | 
| 
      
 174 
     | 
    
         
            +
             
     | 
| 
      
 175 
     | 
    
         
            +
                    classmap = {
         
     | 
| 
      
 176 
     | 
    
         
            +
                        "driveway": "parking",
         
     | 
| 
      
 177 
     | 
    
         
            +
                        "ground": "terrain",
         
     | 
| 
      
 178 
     | 
    
         
            +
                        "unknownGround": "ground",
         
     | 
| 
      
 179 
     | 
    
         
            +
                        "railtrack": "rail track",
         
     | 
| 
      
 180 
     | 
    
         
            +
                    }
         
     | 
| 
      
 181 
     | 
    
         
            +
                    label = child.find("label").text
         
     | 
| 
      
 182 
     | 
    
         
            +
                    if label in classmap.keys():
         
     | 
| 
      
 183 
     | 
    
         
            +
                        label = classmap[label]
         
     | 
| 
      
 184 
     | 
    
         
            +
             
     | 
| 
      
 185 
     | 
    
         
            +
                    self.start_frame = int(child.find("start_frame").text)
         
     | 
| 
      
 186 
     | 
    
         
            +
                    self.end_frame = int(child.find("end_frame").text)
         
     | 
| 
      
 187 
     | 
    
         
            +
             
     | 
| 
      
 188 
     | 
    
         
            +
                    self.semanticId = name2label[label].id
         
     | 
| 
      
 189 
     | 
    
         
            +
                    self.instanceId = 0
         
     | 
| 
      
 190 
     | 
    
         
            +
                    self.parseVertices(child)
         
     | 
| 
      
 191 
     | 
    
         
            +
             
     | 
| 
      
 192 
     | 
    
         
            +
             
     | 
| 
      
 193 
     | 
    
         
            +
            # Class that contains the information of the point cloud a single frame
         
     | 
| 
      
 194 
     | 
    
         
            +
            class KITTI360Point3D(KITTI360Object):
         
     | 
| 
      
 195 
     | 
    
         
            +
                # Constructor
         
     | 
| 
      
 196 
     | 
    
         
            +
                def __init__(self):
         
     | 
| 
      
 197 
     | 
    
         
            +
                    KITTI360Object.__init__(self)
         
     | 
| 
      
 198 
     | 
    
         
            +
             
     | 
| 
      
 199 
     | 
    
         
            +
                    self.vertices = []
         
     | 
| 
      
 200 
     | 
    
         
            +
             
     | 
| 
      
 201 
     | 
    
         
            +
                    self.vertices_proj = None
         
     | 
| 
      
 202 
     | 
    
         
            +
             
     | 
| 
      
 203 
     | 
    
         
            +
                    # the ID of the corresponding object
         
     | 
| 
      
 204 
     | 
    
         
            +
                    self.semanticId = -1
         
     | 
| 
      
 205 
     | 
    
         
            +
                    self.instanceId = -1
         
     | 
| 
      
 206 
     | 
    
         
            +
                    self.annotationId = -1
         
     | 
| 
      
 207 
     | 
    
         
            +
             
     | 
| 
      
 208 
     | 
    
         
            +
                    # name
         
     | 
| 
      
 209 
     | 
    
         
            +
                    self.name = ""
         
     | 
| 
      
 210 
     | 
    
         
            +
             
     | 
| 
      
 211 
     | 
    
         
            +
                    # color
         
     | 
| 
      
 212 
     | 
    
         
            +
                    self.semanticColor = None
         
     | 
| 
      
 213 
     | 
    
         
            +
                    self.instanceColor = None
         
     | 
| 
      
 214 
     | 
    
         
            +
             
     | 
| 
      
 215 
     | 
    
         
            +
                def __str__(self):
         
     | 
| 
      
 216 
     | 
    
         
            +
                    return self.name
         
     | 
| 
      
 217 
     | 
    
         
            +
             
     | 
| 
      
 218 
     | 
    
         
            +
                # def generateMeshes(self):
         
     | 
| 
      
 219 
     | 
    
         
            +
                #     pass
         
     | 
| 
      
 220 
     | 
    
         
            +
             
     | 
| 
      
 221 
     | 
    
         
            +
             
     | 
| 
      
 222 
     | 
    
         
            +
            # Meta class for KITTI360Bbox3D
         
     | 
| 
      
 223 
     | 
    
         
            +
            class Annotation3D:
         
     | 
| 
      
 224 
     | 
    
         
            +
                def __init__(self, labelPath):
         
     | 
| 
      
 225 
     | 
    
         
            +
                    from kitti360scripts.helpers.labels import labels  # pylint: disable=import-error
         
     | 
| 
      
 226 
     | 
    
         
            +
                    import xml.etree.ElementTree as ET
         
     | 
| 
      
 227 
     | 
    
         
            +
             
     | 
| 
      
 228 
     | 
    
         
            +
                    key_name = get_file_name(labelPath)
         
     | 
| 
      
 229 
     | 
    
         
            +
                    # load annotation
         
     | 
| 
      
 230 
     | 
    
         
            +
                    tree = ET.parse(labelPath)
         
     | 
| 
      
 231 
     | 
    
         
            +
                    root = tree.getroot()
         
     | 
| 
      
 232 
     | 
    
         
            +
             
     | 
| 
      
 233 
     | 
    
         
            +
                    self.objects = defaultdict(dict)
         
     | 
| 
      
 234 
     | 
    
         
            +
             
     | 
| 
      
 235 
     | 
    
         
            +
                    self.num_bbox = 0
         
     | 
| 
      
 236 
     | 
    
         
            +
                    for child in root:
         
     | 
| 
      
 237 
     | 
    
         
            +
                        if child.find("transform") is None:
         
     | 
| 
      
 238 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 239 
     | 
    
         
            +
                        obj = KITTI360Bbox3D()
         
     | 
| 
      
 240 
     | 
    
         
            +
                        obj.parseBbox(child)
         
     | 
| 
      
 241 
     | 
    
         
            +
                        globalId = local2global(obj.semanticId, obj.instanceId)
         
     | 
| 
      
 242 
     | 
    
         
            +
                        self.objects[globalId][obj.timestamp] = obj
         
     | 
| 
      
 243 
     | 
    
         
            +
                        self.num_bbox += 1
         
     | 
| 
      
 244 
     | 
    
         
            +
             
     | 
| 
      
 245 
     | 
    
         
            +
                    globalIds = np.asarray(list(self.objects.keys()))
         
     | 
| 
      
 246 
     | 
    
         
            +
                    semanticIds, instanceIds = global2local(globalIds)
         
     | 
| 
      
 247 
     | 
    
         
            +
                    for label in labels:
         
     | 
| 
      
 248 
     | 
    
         
            +
                        if label.hasInstances:
         
     | 
| 
      
 249 
     | 
    
         
            +
                            print(f"{label.name:<30}:\t {(semanticIds==label.id).sum()}")
         
     | 
| 
      
 250 
     | 
    
         
            +
                    print(f"Loaded {len(globalIds)} instances")
         
     | 
| 
      
 251 
     | 
    
         
            +
                    print(f"Loaded {self.num_bbox} boxes")
         
     | 
| 
      
 252 
     | 
    
         
            +
             
     | 
| 
      
 253 
     | 
    
         
            +
                def __call__(self, semanticId, instanceId, timestamp=None):
         
     | 
| 
      
 254 
     | 
    
         
            +
                    globalId = local2global(semanticId, instanceId)
         
     | 
| 
      
 255 
     | 
    
         
            +
                    if globalId in self.objects.keys():
         
     | 
| 
      
 256 
     | 
    
         
            +
                        # static object
         
     | 
| 
      
 257 
     | 
    
         
            +
                        if len(self.objects[globalId].keys()) == 1:
         
     | 
| 
      
 258 
     | 
    
         
            +
                            if -1 in self.objects[globalId].keys():
         
     | 
| 
      
 259 
     | 
    
         
            +
                                return self.objects[globalId][-1]
         
     | 
| 
      
 260 
     | 
    
         
            +
                            else:
         
     | 
| 
      
 261 
     | 
    
         
            +
                                return None
         
     | 
| 
      
 262 
     | 
    
         
            +
                        # dynamic object
         
     | 
| 
      
 263 
     | 
    
         
            +
                        else:
         
     | 
| 
      
 264 
     | 
    
         
            +
                            return self.objects[globalId][timestamp]
         
     | 
| 
      
 265 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 266 
     | 
    
         
            +
                        return None
         
     | 
| 
      
 267 
     | 
    
         
            +
             
     | 
| 
      
 268 
     | 
    
         
            +
                def get_objects(self):
         
     | 
| 
      
 269 
     | 
    
         
            +
                    return [list(obj.values())[0] for obj in self.objects.values()]
         
     | 
| 
      
 270 
     | 
    
         
            +
             
     | 
| 
      
 271 
     | 
    
         
            +
            class StaticTransformations:
         
     | 
| 
      
 272 
     | 
    
         
            +
                def __init__(self, calibrations_path):
         
     | 
| 
      
 273 
     | 
    
         
            +
                    import kitti360scripts.devkits.commons.loadCalibration as lc  # pylint: disable=import-error
         
     | 
| 
      
 274 
     | 
    
         
            +
             
     | 
| 
      
 275 
     | 
    
         
            +
                    cam2velo_path = os.path.join(calibrations_path, "calib_cam_to_velo.txt")
         
     | 
| 
      
 276 
     | 
    
         
            +
                    self.cam2velo = lc.loadCalibrationRigid(cam2velo_path)
         
     | 
| 
      
 277 
     | 
    
         
            +
                    perspective_path = os.path.join(calibrations_path, "perspective.txt")
         
     | 
| 
      
 278 
     | 
    
         
            +
                    self.intrinsic_calibrations = lc.loadPerspectiveIntrinsic(perspective_path)
         
     | 
| 
      
 279 
     | 
    
         
            +
                    self.cam2world = None
         
     | 
| 
      
 280 
     | 
    
         
            +
             
     | 
| 
      
 281 
     | 
    
         
            +
                def set_cam2world(self, cam2world_path):
         
     | 
| 
      
 282 
     | 
    
         
            +
                    if not os.path.isfile(cam2world_path):
         
     | 
| 
      
 283 
     | 
    
         
            +
                        logger.warn("Camera to world calibration file was not found")
         
     | 
| 
      
 284 
     | 
    
         
            +
                        return
         
     | 
| 
      
 285 
     | 
    
         
            +
             
     | 
| 
      
 286 
     | 
    
         
            +
                    cam2world_rows = np.loadtxt(cam2world_path)
         
     | 
| 
      
 287 
     | 
    
         
            +
                    cam2world_rigid = np.reshape(cam2world_rows[:, 1:], (-1, 4, 4))
         
     | 
| 
      
 288 
     | 
    
         
            +
                    frames_numbers = list(np.reshape(cam2world_rows[:, :1], (-1)).astype(int))
         
     | 
| 
      
 289 
     | 
    
         
            +
                    cam2world = {}
         
     | 
| 
      
 290 
     | 
    
         
            +
             
     | 
| 
      
 291 
     | 
    
         
            +
                    current_rigid = cam2world_rigid[0]
         
     | 
| 
      
 292 
     | 
    
         
            +
             
     | 
| 
      
 293 
     | 
    
         
            +
                    for frame_index in range(0, frames_numbers[-1]):
         
     | 
| 
      
 294 
     | 
    
         
            +
                        if frame_index in frames_numbers:
         
     | 
| 
      
 295 
     | 
    
         
            +
                            mapped_index = frames_numbers.index(frame_index)
         
     | 
| 
      
 296 
     | 
    
         
            +
                            current_rigid = cam2world_rigid[mapped_index]
         
     | 
| 
      
 297 
     | 
    
         
            +
             
     | 
| 
      
 298 
     | 
    
         
            +
                        # (Tr(cam -> world))
         
     | 
| 
      
 299 
     | 
    
         
            +
                        cam2world[frame_index] = current_rigid
         
     | 
| 
      
 300 
     | 
    
         
            +
                    self.cam2world = cam2world
         
     | 
| 
      
 301 
     | 
    
         
            +
             
     | 
| 
      
 302 
     | 
    
         
            +
                def world_to_velo_transformation(self, obj, frame_index):
         
     | 
| 
      
 303 
     | 
    
         
            +
                    # rotate_z = Rotation.from_rotvec(np.pi * np.array([0, 0, 1])).as_matrix()
         
     | 
| 
      
 304 
     | 
    
         
            +
                    # rotate_z = np.hstack((rotate_z, np.asarray([[0, 0, 0]]).T))
         
     | 
| 
      
 305 
     | 
    
         
            +
             
     | 
| 
      
 306 
     | 
    
         
            +
                    # tr0(local -> fixed_coordinates_local)
         
     | 
| 
      
 307 
     | 
    
         
            +
                    tr0 = np.asarray([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
         
     | 
| 
      
 308 
     | 
    
         
            +
             
     | 
| 
      
 309 
     | 
    
         
            +
                    # tr0(fixed_coordinates_local -> world)
         
     | 
| 
      
 310 
     | 
    
         
            +
                    tr1 = obj.transform
         
     | 
| 
      
 311 
     | 
    
         
            +
             
     | 
| 
      
 312 
     | 
    
         
            +
                    # tr2(world -> cam)
         
     | 
| 
      
 313 
     | 
    
         
            +
                    tr2 = np.linalg.inv(self.cam2world[frame_index])
         
     | 
| 
      
 314 
     | 
    
         
            +
             
     | 
| 
      
 315 
     | 
    
         
            +
                    # tr3(world -> cam)
         
     | 
| 
      
 316 
     | 
    
         
            +
                    tr3 = self.cam2velo
         
     | 
| 
      
 317 
     | 
    
         
            +
             
     | 
| 
      
 318 
     | 
    
         
            +
                    return tr3 @ tr2 @ tr1 @ tr0
         
     | 
| 
      
 319 
     | 
    
         
            +
             
     | 
| 
      
 320 
     | 
    
         
            +
                def get_extrinsic_matrix(self):
         
     | 
| 
      
 321 
     | 
    
         
            +
                    return np.linalg.inv(self.cam2velo)[:3, :4]
         
     | 
| 
      
 322 
     | 
    
         
            +
             
     | 
| 
      
 323 
     | 
    
         
            +
                def get_intrinsics_matrix(self, camera_num):
         
     | 
| 
      
 324 
     | 
    
         
            +
                    try:
         
     | 
| 
      
 325 
     | 
    
         
            +
                        matrix = self.intrinsic_calibrations[f"P_rect_0{camera_num}"][:3, :3]
         
     | 
| 
      
 326 
     | 
    
         
            +
                        return matrix
         
     | 
| 
      
 327 
     | 
    
         
            +
                    except KeyError:
         
     | 
| 
      
 328 
     | 
    
         
            +
                        logger.warn(f"Camera {camera_num} intrinsic matrix was not found")
         
     | 
| 
      
 329 
     | 
    
         
            +
                    return
         
     | 
| 
      
 330 
     | 
    
         
            +
             
     | 
| 
      
 331 
     | 
    
         
            +
            def convert_kitti_cuboid_to_supervisely_geometry(tr_matrix):
         
     | 
| 
      
 332 
     | 
    
         
            +
                import transforms3d  # pylint: disable=import-error
         
     | 
| 
      
 333 
     | 
    
         
            +
                from scipy.spatial.transform.rotation import Rotation
         
     | 
| 
      
 334 
     | 
    
         
            +
             
     | 
| 
      
 335 
     | 
    
         
            +
                Tdash, Rdash, Zdash, _ = transforms3d.affines.decompose44(tr_matrix)
         
     | 
| 
      
 336 
     | 
    
         
            +
             
     | 
| 
      
 337 
     | 
    
         
            +
                x, y, z = Tdash[0], Tdash[1], Tdash[2]
         
     | 
| 
      
 338 
     | 
    
         
            +
                position = Vector3d(x, y, z)
         
     | 
| 
      
 339 
     | 
    
         
            +
             
     | 
| 
      
 340 
     | 
    
         
            +
                rotation_angles = Rotation.from_matrix(Rdash).as_euler("xyz", degrees=False)
         
     | 
| 
      
 341 
     | 
    
         
            +
                r_x, r_y, r_z = rotation_angles[0], rotation_angles[1], rotation_angles[2]
         
     | 
| 
      
 342 
     | 
    
         
            +
             
     | 
| 
      
 343 
     | 
    
         
            +
                # Invert the bbox by adding π to the yaw while maintaining its degree relative to the world
         
     | 
| 
      
 344 
     | 
    
         
            +
                rotation = Vector3d(r_x, r_y, r_z + np.pi)
         
     | 
| 
      
 345 
     | 
    
         
            +
             
     | 
| 
      
 346 
     | 
    
         
            +
                w, h, l = Zdash[0], Zdash[1], Zdash[2]
         
     | 
| 
      
 347 
     | 
    
         
            +
                dimension = Vector3d(w, h, l)
         
     | 
| 
      
 348 
     | 
    
         
            +
             
     | 
| 
      
 349 
     | 
    
         
            +
                return Cuboid3d(position, rotation, dimension)
         
     | 
| 
      
 350 
     | 
    
         
            +
             
     | 
| 
      
 351 
     | 
    
         
            +
            def convert_bin_to_pcd(src, dst):
         
     | 
| 
      
 352 
     | 
    
         
            +
                import open3d as o3d  # pylint: disable=import-error
         
     | 
| 
      
 353 
     | 
    
         
            +
             
     | 
| 
      
 354 
     | 
    
         
            +
                try:
         
     | 
| 
      
 355 
     | 
    
         
            +
                    bin = np.fromfile(src, dtype=np.float32).reshape(-1, 4)
         
     | 
| 
      
 356 
     | 
    
         
            +
                except ValueError as e:
         
     | 
| 
      
 357 
     | 
    
         
            +
                    raise Exception(
         
     | 
| 
      
 358 
     | 
    
         
            +
                        f"Incorrect data in the KITTI 3D pointcloud file: {src}. "
         
     | 
| 
      
 359 
     | 
    
         
            +
                        f"There was an error while trying to reshape the data into a 4-column matrix: {e}. "
         
     | 
| 
      
 360 
     | 
    
         
            +
                        "Please ensure that the binary file contains a multiple of 4 elements to be "
         
     | 
| 
      
 361 
     | 
    
         
            +
                        "successfully reshaped into a (N, 4) array.\n"
         
     | 
| 
      
 362 
     | 
    
         
            +
                    )
         
     | 
| 
      
 363 
     | 
    
         
            +
                points = bin[:, 0:3]
         
     | 
| 
      
 364 
     | 
    
         
            +
                intensity = bin[:, -1]
         
     | 
| 
      
 365 
     | 
    
         
            +
                intensity_fake_rgb = np.zeros((intensity.shape[0], 3))
         
     | 
| 
      
 366 
     | 
    
         
            +
                intensity_fake_rgb[:, 0] = intensity
         
     | 
| 
      
 367 
     | 
    
         
            +
                pc = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))
         
     | 
| 
      
 368 
     | 
    
         
            +
                pc.colors = o3d.utility.Vector3dVector(intensity_fake_rgb)
         
     | 
| 
      
 369 
     | 
    
         
            +
                o3d.io.write_point_cloud(dst, pc)
         
     | 
| 
      
 370 
     | 
    
         
            +
             
     | 
| 
      
 371 
     | 
    
         
            +
             
     | 
| 
      
 372 
     | 
    
         
            +
            def convert_calib_to_image_meta(image_name, static, cam_num):
         
     | 
| 
      
 373 
     | 
    
         
            +
                intrinsic_matrix = static.get_intrinsics_matrix(cam_num)
         
     | 
| 
      
 374 
     | 
    
         
            +
                extrinsic_matrix = static.get_extrinsic_matrix()
         
     | 
| 
      
 375 
     | 
    
         
            +
             
     | 
| 
      
 376 
     | 
    
         
            +
                data = {
         
     | 
| 
      
 377 
     | 
    
         
            +
                    "name": image_name,
         
     | 
| 
      
 378 
     | 
    
         
            +
                    "meta": {
         
     | 
| 
      
 379 
     | 
    
         
            +
                        "deviceId": cam_num,
         
     | 
| 
      
 380 
     | 
    
         
            +
                        "sensorsData": {
         
     | 
| 
      
 381 
     | 
    
         
            +
                            "extrinsicMatrix": list(extrinsic_matrix.flatten().astype(float)),
         
     | 
| 
      
 382 
     | 
    
         
            +
                            "intrinsicMatrix": list(intrinsic_matrix.flatten().astype(float)),
         
     | 
| 
      
 383 
     | 
    
         
            +
                        },
         
     | 
| 
      
 384 
     | 
    
         
            +
                    },
         
     | 
| 
      
 385 
     | 
    
         
            +
                }
         
     | 
| 
      
 386 
     | 
    
         
            +
                return data
         
     | 
| 
         
            File without changes
         
     | 
| 
         @@ -0,0 +1,151 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            import os
         
     | 
| 
      
 2 
     | 
    
         
            +
            from pathlib import Path
         
     | 
| 
      
 3 
     | 
    
         
            +
             
     | 
| 
      
 4 
     | 
    
         
            +
            import magic
         
     | 
| 
      
 5 
     | 
    
         
            +
             
     | 
| 
      
 6 
     | 
    
         
            +
            from supervisely import ProjectMeta, generate_free_name, logger
         
     | 
| 
      
 7 
     | 
    
         
            +
            from supervisely._utils import batched, is_development
         
     | 
| 
      
 8 
     | 
    
         
            +
            from supervisely.annotation.obj_class import ObjClass
         
     | 
| 
      
 9 
     | 
    
         
            +
            from supervisely.annotation.obj_class_collection import ObjClassCollection
         
     | 
| 
      
 10 
     | 
    
         
            +
            from supervisely.api.api import Api
         
     | 
| 
      
 11 
     | 
    
         
            +
            from supervisely.convert.base_converter import AvailableVolumeConverters
         
     | 
| 
      
 12 
     | 
    
         
            +
            from supervisely.convert.volume.nii import nii_volume_helper as helper
         
     | 
| 
      
 13 
     | 
    
         
            +
            from supervisely.convert.volume.volume_converter import VolumeConverter
         
     | 
| 
      
 14 
     | 
    
         
            +
            from supervisely.geometry.mask_3d import Mask3D
         
     | 
| 
      
 15 
     | 
    
         
            +
            from supervisely.io.fs import (
         
     | 
| 
      
 16 
     | 
    
         
            +
                get_file_ext,
         
     | 
| 
      
 17 
     | 
    
         
            +
                get_file_name,
         
     | 
| 
      
 18 
     | 
    
         
            +
                get_file_name_with_ext,
         
     | 
| 
      
 19 
     | 
    
         
            +
                list_files,
         
     | 
| 
      
 20 
     | 
    
         
            +
            )
         
     | 
| 
      
 21 
     | 
    
         
            +
            from supervisely.volume.volume import is_nifti_file
         
     | 
| 
      
 22 
     | 
    
         
            +
            from supervisely.volume_annotation.volume_annotation import VolumeAnnotation
         
     | 
| 
      
 23 
     | 
    
         
            +
            from supervisely.volume_annotation.volume_object import VolumeObject
         
     | 
| 
      
 24 
     | 
    
         
            +
             
     | 
| 
      
 25 
     | 
    
         
            +
             
     | 
| 
      
 26 
     | 
    
         
            +
            class NiiConverter(VolumeConverter):
         
     | 
| 
      
 27 
     | 
    
         
            +
             
     | 
| 
      
 28 
     | 
    
         
            +
                def __str__(self) -> str:
         
     | 
| 
      
 29 
     | 
    
         
            +
                    return AvailableVolumeConverters.NII
         
     | 
| 
      
 30 
     | 
    
         
            +
             
     | 
| 
      
 31 
     | 
    
         
            +
                def validate_format(self) -> bool:
         
     | 
| 
      
 32 
     | 
    
         
            +
                    # create Items
         
     | 
| 
      
 33 
     | 
    
         
            +
                    converted_dir_name = "converted"
         
     | 
| 
      
 34 
     | 
    
         
            +
                    # nrrds_dict = {}
         
     | 
| 
      
 35 
     | 
    
         
            +
                    nifti_dict = {}
         
     | 
| 
      
 36 
     | 
    
         
            +
                    nifti_dirs = {}
         
     | 
| 
      
 37 
     | 
    
         
            +
                    for root, _, files in os.walk(self._input_data):
         
     | 
| 
      
 38 
     | 
    
         
            +
                        dir_name = os.path.basename(root)
         
     | 
| 
      
 39 
     | 
    
         
            +
                        nifti_dirs[dir_name] = root
         
     | 
| 
      
 40 
     | 
    
         
            +
                        if converted_dir_name in root:
         
     | 
| 
      
 41 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 42 
     | 
    
         
            +
                        for file in files:
         
     | 
| 
      
 43 
     | 
    
         
            +
                            path = os.path.join(root, file)
         
     | 
| 
      
 44 
     | 
    
         
            +
                            mime = magic.from_file(path, mime=True)
         
     | 
| 
      
 45 
     | 
    
         
            +
                            if mime == "application/gzip" or mime == "application/octet-stream":
         
     | 
| 
      
 46 
     | 
    
         
            +
                                if is_nifti_file(path):  # is nifti
         
     | 
| 
      
 47 
     | 
    
         
            +
                                    name = get_file_name(path)
         
     | 
| 
      
 48 
     | 
    
         
            +
                                    if name.endswith(".nii"):
         
     | 
| 
      
 49 
     | 
    
         
            +
                                        name = get_file_name(name)
         
     | 
| 
      
 50 
     | 
    
         
            +
                                    nifti_dict[name] = path
         
     | 
| 
      
 51 
     | 
    
         
            +
             
     | 
| 
      
 52 
     | 
    
         
            +
                    self._items = []
         
     | 
| 
      
 53 
     | 
    
         
            +
                    skip_files = []
         
     | 
| 
      
 54 
     | 
    
         
            +
                    for name, nrrd_path in nifti_dict.items():
         
     | 
| 
      
 55 
     | 
    
         
            +
                        if name in nifti_dirs:
         
     | 
| 
      
 56 
     | 
    
         
            +
                            item = self.Item(item_path=nrrd_path)
         
     | 
| 
      
 57 
     | 
    
         
            +
                            ann_dir = nifti_dirs[name]
         
     | 
| 
      
 58 
     | 
    
         
            +
                            item.ann_data = list_files(ann_dir, [".nii", ".nii.gz", ".gz"], None, True)
         
     | 
| 
      
 59 
     | 
    
         
            +
                            self._items.append(item)
         
     | 
| 
      
 60 
     | 
    
         
            +
                            skip_files.extend(item.ann_data)
         
     | 
| 
      
 61 
     | 
    
         
            +
                            skip_files.append(nrrd_path)
         
     | 
| 
      
 62 
     | 
    
         
            +
             
     | 
| 
      
 63 
     | 
    
         
            +
                    for name, nrrd_path in nifti_dict.items():
         
     | 
| 
      
 64 
     | 
    
         
            +
                        if nrrd_path in skip_files:
         
     | 
| 
      
 65 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 66 
     | 
    
         
            +
                        item = self.Item(item_path=nrrd_path)
         
     | 
| 
      
 67 
     | 
    
         
            +
                        self._items.append(item)
         
     | 
| 
      
 68 
     | 
    
         
            +
             
     | 
| 
      
 69 
     | 
    
         
            +
                    self._meta = ProjectMeta()
         
     | 
| 
      
 70 
     | 
    
         
            +
                    return self.items_count > 0
         
     | 
| 
      
 71 
     | 
    
         
            +
             
     | 
| 
      
 72 
     | 
    
         
            +
                def upload_dataset(
         
     | 
| 
      
 73 
     | 
    
         
            +
                    self,
         
     | 
| 
      
 74 
     | 
    
         
            +
                    api: Api,
         
     | 
| 
      
 75 
     | 
    
         
            +
                    dataset_id: int,
         
     | 
| 
      
 76 
     | 
    
         
            +
                    batch_size: int = 1,
         
     | 
| 
      
 77 
     | 
    
         
            +
                    log_progress=True,
         
     | 
| 
      
 78 
     | 
    
         
            +
                ):
         
     | 
| 
      
 79 
     | 
    
         
            +
                    """Upload converted data to Supervisely"""
         
     | 
| 
      
 80 
     | 
    
         
            +
             
     | 
| 
      
 81 
     | 
    
         
            +
                    meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
         
     | 
| 
      
 82 
     | 
    
         
            +
             
     | 
| 
      
 83 
     | 
    
         
            +
                    existing_names = set([vol.name for vol in api.volume.get_list(dataset_id)])
         
     | 
| 
      
 84 
     | 
    
         
            +
             
     | 
| 
      
 85 
     | 
    
         
            +
                    if log_progress:
         
     | 
| 
      
 86 
     | 
    
         
            +
                        progress, progress_cb = self.get_progress(
         
     | 
| 
      
 87 
     | 
    
         
            +
                            self.items_count, "Converting and uploading volumes..."
         
     | 
| 
      
 88 
     | 
    
         
            +
                        )
         
     | 
| 
      
 89 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 90 
     | 
    
         
            +
                        progress_cb = None
         
     | 
| 
      
 91 
     | 
    
         
            +
             
     | 
| 
      
 92 
     | 
    
         
            +
                    converted_dir_name = "converted"
         
     | 
| 
      
 93 
     | 
    
         
            +
                    converted_dir = os.path.join(self._input_data, converted_dir_name)
         
     | 
| 
      
 94 
     | 
    
         
            +
                    meta_changed = False
         
     | 
| 
      
 95 
     | 
    
         
            +
             
     | 
| 
      
 96 
     | 
    
         
            +
                    for batch in batched(self._items, batch_size=batch_size):
         
     | 
| 
      
 97 
     | 
    
         
            +
                        item_names = []
         
     | 
| 
      
 98 
     | 
    
         
            +
                        item_paths = []
         
     | 
| 
      
 99 
     | 
    
         
            +
             
     | 
| 
      
 100 
     | 
    
         
            +
                        for item in batch:
         
     | 
| 
      
 101 
     | 
    
         
            +
                            # nii_path = item.path
         
     | 
| 
      
 102 
     | 
    
         
            +
                            item.path = helper.nifti_to_nrrd(item.path, converted_dir)
         
     | 
| 
      
 103 
     | 
    
         
            +
                            ext = get_file_ext(item.path)
         
     | 
| 
      
 104 
     | 
    
         
            +
                            if ext.lower() != ext:
         
     | 
| 
      
 105 
     | 
    
         
            +
                                new_volume_path = Path(item.path).with_suffix(ext.lower()).as_posix()
         
     | 
| 
      
 106 
     | 
    
         
            +
                                os.rename(item.path, new_volume_path)
         
     | 
| 
      
 107 
     | 
    
         
            +
                                item.path = new_volume_path
         
     | 
| 
      
 108 
     | 
    
         
            +
                            item.name = get_file_name_with_ext(item.path)
         
     | 
| 
      
 109 
     | 
    
         
            +
                            item.name = generate_free_name(
         
     | 
| 
      
 110 
     | 
    
         
            +
                                existing_names, item.name, with_ext=True, extend_used_names=True
         
     | 
| 
      
 111 
     | 
    
         
            +
                            )
         
     | 
| 
      
 112 
     | 
    
         
            +
                            item_names.append(item.name)
         
     | 
| 
      
 113 
     | 
    
         
            +
                            item_paths.append(item.path)
         
     | 
| 
      
 114 
     | 
    
         
            +
             
     | 
| 
      
 115 
     | 
    
         
            +
                            volume_info = api.volume.upload_nrrd_serie_path(
         
     | 
| 
      
 116 
     | 
    
         
            +
                                dataset_id, name=item.name, path=item.path
         
     | 
| 
      
 117 
     | 
    
         
            +
                            )
         
     | 
| 
      
 118 
     | 
    
         
            +
             
     | 
| 
      
 119 
     | 
    
         
            +
                            if isinstance(item.ann_data, list) and len(item.ann_data) > 0:
         
     | 
| 
      
 120 
     | 
    
         
            +
                                objs = []
         
     | 
| 
      
 121 
     | 
    
         
            +
                                spatial_figures = []
         
     | 
| 
      
 122 
     | 
    
         
            +
                                for ann_path in item.ann_data:
         
     | 
| 
      
 123 
     | 
    
         
            +
                                    ann_name = get_file_name(ann_path)
         
     | 
| 
      
 124 
     | 
    
         
            +
                                    if ann_name.endswith(".nii"):
         
     | 
| 
      
 125 
     | 
    
         
            +
                                        ann_name = get_file_name(ann_name)
         
     | 
| 
      
 126 
     | 
    
         
            +
                                    for mask, _ in helper.get_annotation_from_nii(ann_path):
         
     | 
| 
      
 127 
     | 
    
         
            +
                                        obj_class = meta.get_obj_class(ann_name)
         
     | 
| 
      
 128 
     | 
    
         
            +
                                        if obj_class is None:
         
     | 
| 
      
 129 
     | 
    
         
            +
                                            obj_class = ObjClass(ann_name, Mask3D)
         
     | 
| 
      
 130 
     | 
    
         
            +
                                            meta = meta.add_obj_class(obj_class)
         
     | 
| 
      
 131 
     | 
    
         
            +
                                            meta_changed = True
         
     | 
| 
      
 132 
     | 
    
         
            +
                                        obj = VolumeObject(obj_class, mask_3d=mask)
         
     | 
| 
      
 133 
     | 
    
         
            +
                                        spatial_figures.append(obj.figure)
         
     | 
| 
      
 134 
     | 
    
         
            +
                                        objs.append(obj)
         
     | 
| 
      
 135 
     | 
    
         
            +
                                ann = VolumeAnnotation(
         
     | 
| 
      
 136 
     | 
    
         
            +
                                    volume_info.meta, objects=objs, spatial_figures=spatial_figures
         
     | 
| 
      
 137 
     | 
    
         
            +
                                )
         
     | 
| 
      
 138 
     | 
    
         
            +
             
     | 
| 
      
 139 
     | 
    
         
            +
                                if meta_changed:
         
     | 
| 
      
 140 
     | 
    
         
            +
                                    self._meta = meta
         
     | 
| 
      
 141 
     | 
    
         
            +
                                    _, _, _ = self.merge_metas_with_conflicts(api, dataset_id)
         
     | 
| 
      
 142 
     | 
    
         
            +
             
     | 
| 
      
 143 
     | 
    
         
            +
                                api.volume.annotation.append(volume_info.id, ann)
         
     | 
| 
      
 144 
     | 
    
         
            +
             
     | 
| 
      
 145 
     | 
    
         
            +
                        if log_progress:
         
     | 
| 
      
 146 
     | 
    
         
            +
                            progress_cb(len(batch))
         
     | 
| 
      
 147 
     | 
    
         
            +
             
     | 
| 
      
 148 
     | 
    
         
            +
                    if log_progress:
         
     | 
| 
      
 149 
     | 
    
         
            +
                        if is_development():
         
     | 
| 
      
 150 
     | 
    
         
            +
                            progress.close()
         
     | 
| 
      
 151 
     | 
    
         
            +
                    logger.info(f"Dataset ID:{dataset_id} has been successfully uploaded.")
         
     | 
| 
         @@ -0,0 +1,38 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            import os
         
     | 
| 
      
 2 
     | 
    
         
            +
            from typing import Generator
         
     | 
| 
      
 3 
     | 
    
         
            +
             
     | 
| 
      
 4 
     | 
    
         
            +
            import nrrd
         
     | 
| 
      
 5 
     | 
    
         
            +
            import numpy as np
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            from supervisely.geometry.mask_3d import Mask3D
         
     | 
| 
      
 8 
     | 
    
         
            +
            from supervisely.io.fs import ensure_base_path, get_file_ext, get_file_name
         
     | 
| 
      
 9 
     | 
    
         
            +
            from supervisely.volume.volume import convert_3d_nifti_to_nrrd
         
     | 
| 
      
 10 
     | 
    
         
            +
             
     | 
| 
      
 11 
     | 
    
         
            +
             
     | 
| 
      
 12 
     | 
    
         
            +
            def nifti_to_nrrd(nii_file_path: str, converted_dir: str) -> str:
         
     | 
| 
      
 13 
     | 
    
         
            +
                """Convert NIfTI 3D volume file to NRRD 3D volume file."""
         
     | 
| 
      
 14 
     | 
    
         
            +
             
     | 
| 
      
 15 
     | 
    
         
            +
                output_name = get_file_name(nii_file_path)
         
     | 
| 
      
 16 
     | 
    
         
            +
                if get_file_ext(output_name) == ".nii":
         
     | 
| 
      
 17 
     | 
    
         
            +
                    output_name = get_file_name(output_name)
         
     | 
| 
      
 18 
     | 
    
         
            +
             
     | 
| 
      
 19 
     | 
    
         
            +
                data, header = convert_3d_nifti_to_nrrd(nii_file_path)
         
     | 
| 
      
 20 
     | 
    
         
            +
             
     | 
| 
      
 21 
     | 
    
         
            +
                nrrd_file_path = os.path.join(converted_dir, f"{output_name}.nrrd")
         
     | 
| 
      
 22 
     | 
    
         
            +
                ensure_base_path(nrrd_file_path)
         
     | 
| 
      
 23 
     | 
    
         
            +
             
     | 
| 
      
 24 
     | 
    
         
            +
                nrrd.write(nrrd_file_path, data, header)
         
     | 
| 
      
 25 
     | 
    
         
            +
                return nrrd_file_path
         
     | 
| 
      
 26 
     | 
    
         
            +
             
     | 
| 
      
 27 
     | 
    
         
            +
             
     | 
| 
      
 28 
     | 
    
         
            +
            def get_annotation_from_nii(path: str) -> Generator[Mask3D, None, None]:
         
     | 
| 
      
 29 
     | 
    
         
            +
                """Get annotation from NIfTI 3D volume file."""
         
     | 
| 
      
 30 
     | 
    
         
            +
             
     | 
| 
      
 31 
     | 
    
         
            +
                data, _ = convert_3d_nifti_to_nrrd(path)
         
     | 
| 
      
 32 
     | 
    
         
            +
                unique_classes = np.unique(data)
         
     | 
| 
      
 33 
     | 
    
         
            +
             
     | 
| 
      
 34 
     | 
    
         
            +
                for class_id in unique_classes:
         
     | 
| 
      
 35 
     | 
    
         
            +
                    if class_id == 0:
         
     | 
| 
      
 36 
     | 
    
         
            +
                        continue
         
     | 
| 
      
 37 
     | 
    
         
            +
                    mask = Mask3D(data == class_id)
         
     | 
| 
      
 38 
     | 
    
         
            +
                    yield mask, class_id
         
     | 
| 
         @@ -1347,6 +1347,7 @@ class Inference: 
     | 
|
| 
       1347 
1347 
     | 
    
         
             
                            source=images_np,
         
     | 
| 
       1348 
1348 
     | 
    
         
             
                            settings=settings,
         
     | 
| 
       1349 
1349 
     | 
    
         
             
                        )
         
     | 
| 
      
 1350 
     | 
    
         
            +
                        anns = self._exclude_duplicated_predictions(api, anns, settings, dataset_id, ids)
         
     | 
| 
       1350 
1351 
     | 
    
         
             
                        results.extend(self._format_output(anns, slides_data))
         
     | 
| 
       1351 
1352 
     | 
    
         
             
                    return results
         
     | 
| 
       1352 
1353 
     | 
    
         | 
| 
         @@ -1395,6 +1396,10 @@ class Inference: 
     | 
|
| 
       1395 
1396 
     | 
    
         
             
                            )
         
     | 
| 
       1396 
1397 
     | 
    
         
             
                            self.cache.set_project_meta(output_project_id, output_project_meta)
         
     | 
| 
       1397 
1398 
     | 
    
         | 
| 
      
 1399 
     | 
    
         
            +
                        ann = self._exclude_duplicated_predictions(
         
     | 
| 
      
 1400 
     | 
    
         
            +
                            api, anns, settings, ds_info.id, [image_id], output_project_meta
         
     | 
| 
      
 1401 
     | 
    
         
            +
                        )[0]
         
     | 
| 
      
 1402 
     | 
    
         
            +
             
     | 
| 
       1398 
1403 
     | 
    
         
             
                        logger.debug(
         
     | 
| 
       1399 
1404 
     | 
    
         
             
                            "Uploading annotation...",
         
     | 
| 
       1400 
1405 
     | 
    
         
             
                            extra={
         
     | 
| 
         @@ -1404,6 +1409,10 @@ class Inference: 
     | 
|
| 
       1404 
1409 
     | 
    
         
             
                            },
         
     | 
| 
       1405 
1410 
     | 
    
         
             
                        )
         
     | 
| 
       1406 
1411 
     | 
    
         
             
                        api.annotation.upload_ann(image_id, ann)
         
     | 
| 
      
 1412 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 1413 
     | 
    
         
            +
                        ann = self._exclude_duplicated_predictions(
         
     | 
| 
      
 1414 
     | 
    
         
            +
                            api, anns, settings, image_info.dataset_id, [image_id]
         
     | 
| 
      
 1415 
     | 
    
         
            +
                        )[0]
         
     | 
| 
       1407 
1416 
     | 
    
         | 
| 
       1408 
1417 
     | 
    
         
             
                    result = self._format_output(anns, slides_data)[0]
         
     | 
| 
       1409 
1418 
     | 
    
         
             
                    if async_inference_request_uuid is not None and ann is not None:
         
     | 
| 
         @@ -1786,6 +1795,15 @@ class Inference: 
     | 
|
| 
       1786 
1795 
     | 
    
         
             
                            batch_results = []
         
     | 
| 
       1787 
1796 
     | 
    
         
             
                            for i, ann in enumerate(anns):
         
     | 
| 
       1788 
1797 
     | 
    
         
             
                                image_info: ImageInfo = images_infos_dict[image_ids_batch[i]]
         
     | 
| 
      
 1798 
     | 
    
         
            +
                                ds_info = dataset_infos_dict[image_info.dataset_id]
         
     | 
| 
      
 1799 
     | 
    
         
            +
                                meta = output_project_metas_dict.get(ds_info.project_id, None)
         
     | 
| 
      
 1800 
     | 
    
         
            +
                                iou = settings.get("existing_objects_iou_thresh")
         
     | 
| 
      
 1801 
     | 
    
         
            +
                                if meta is None and isinstance(iou, float) and iou > 0:
         
     | 
| 
      
 1802 
     | 
    
         
            +
                                    meta = ProjectMeta.from_json(api.project.get_meta(ds_info.project_id))
         
     | 
| 
      
 1803 
     | 
    
         
            +
                                    output_project_metas_dict[ds_info.project_id] = meta
         
     | 
| 
      
 1804 
     | 
    
         
            +
                                ann = self._exclude_duplicated_predictions(
         
     | 
| 
      
 1805 
     | 
    
         
            +
                                    api, [ann], settings, ds_info.id, [image_info.id], meta
         
     | 
| 
      
 1806 
     | 
    
         
            +
                                )[0]
         
     | 
| 
       1789 
1807 
     | 
    
         
             
                                batch_results.append(
         
     | 
| 
       1790 
1808 
     | 
    
         
             
                                    {
         
     | 
| 
       1791 
1809 
     | 
    
         
             
                                        "annotation": ann.to_json(),
         
     | 
| 
         @@ -2086,6 +2104,19 @@ class Inference: 
     | 
|
| 
       2086 
2104 
     | 
    
         
             
                                    source=images_nps,
         
     | 
| 
       2087 
2105 
     | 
    
         
             
                                    settings=settings,
         
     | 
| 
       2088 
2106 
     | 
    
         
             
                                )
         
     | 
| 
      
 2107 
     | 
    
         
            +
                                iou = settings.get("existing_objects_iou_thresh")
         
     | 
| 
      
 2108 
     | 
    
         
            +
                                if output_project_meta is None and isinstance(iou, float) and iou > 0:
         
     | 
| 
      
 2109 
     | 
    
         
            +
                                    output_project_meta = ProjectMeta.from_json(
         
     | 
| 
      
 2110 
     | 
    
         
            +
                                        api.project.get_meta(project_info.id)
         
     | 
| 
      
 2111 
     | 
    
         
            +
                                    )
         
     | 
| 
      
 2112 
     | 
    
         
            +
                                anns = self._exclude_duplicated_predictions(
         
     | 
| 
      
 2113 
     | 
    
         
            +
                                    api,
         
     | 
| 
      
 2114 
     | 
    
         
            +
                                    anns,
         
     | 
| 
      
 2115 
     | 
    
         
            +
                                    settings,
         
     | 
| 
      
 2116 
     | 
    
         
            +
                                    dataset_info.id,
         
     | 
| 
      
 2117 
     | 
    
         
            +
                                    [ii.id for ii in images_infos_batch],
         
     | 
| 
      
 2118 
     | 
    
         
            +
                                    output_project_meta,
         
     | 
| 
      
 2119 
     | 
    
         
            +
                                )
         
     | 
| 
       2089 
2120 
     | 
    
         
             
                                batch_results = []
         
     | 
| 
       2090 
2121 
     | 
    
         
             
                                for i, ann in enumerate(anns):
         
     | 
| 
       2091 
2122 
     | 
    
         
             
                                    batch_results.append(
         
     | 
| 
         @@ -2935,7 +2966,9 @@ class Inference: 
     | 
|
| 
       2935 
2966 
     | 
    
         
             
                    parser = argparse.ArgumentParser(description="Run Inference Serving")
         
     | 
| 
       2936 
2967 
     | 
    
         | 
| 
       2937 
2968 
     | 
    
         
             
                    # Positional args
         
     | 
| 
       2938 
     | 
    
         
            -
                    parser.add_argument( 
     | 
| 
      
 2969 
     | 
    
         
            +
                    parser.add_argument(
         
     | 
| 
      
 2970 
     | 
    
         
            +
                        "mode", nargs="?", type=str, help="Mode of operation: 'deploy' or 'predict'"
         
     | 
| 
      
 2971 
     | 
    
         
            +
                    )
         
     | 
| 
       2939 
2972 
     | 
    
         
             
                    parser.add_argument("input", nargs="?", type=str, help="Local path to input data")
         
     | 
| 
       2940 
2973 
     | 
    
         | 
| 
       2941 
2974 
     | 
    
         
             
                    # Deploy args
         
     | 
| 
         @@ -3459,6 +3492,127 @@ class Inference: 
     | 
|
| 
       3459 
3492 
     | 
    
         
             
                            f"Checkpoint {checkpoint_url} not found in Team Files. Cannot set workflow input"
         
     | 
| 
       3460 
3493 
     | 
    
         
             
                        )
         
     | 
| 
       3461 
3494 
     | 
    
         | 
| 
      
 3495 
     | 
    
         
            +
                def _exclude_duplicated_predictions(
         
     | 
| 
      
 3496 
     | 
    
         
            +
                    self,
         
     | 
| 
      
 3497 
     | 
    
         
            +
                    api: Api,
         
     | 
| 
      
 3498 
     | 
    
         
            +
                    pred_anns: List[Annotation],
         
     | 
| 
      
 3499 
     | 
    
         
            +
                    settings: dict,
         
     | 
| 
      
 3500 
     | 
    
         
            +
                    dataset_id: int,
         
     | 
| 
      
 3501 
     | 
    
         
            +
                    gt_image_ids: List[int],
         
     | 
| 
      
 3502 
     | 
    
         
            +
                    meta: Optional[ProjectMeta] = None,
         
     | 
| 
      
 3503 
     | 
    
         
            +
                ):
         
     | 
| 
      
 3504 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3505 
     | 
    
         
            +
                    Filter out predictions that significantly overlap with ground truth (GT) objects.
         
     | 
| 
      
 3506 
     | 
    
         
            +
             
     | 
| 
      
 3507 
     | 
    
         
            +
                    This is a wrapper around the `_filter_duplicated_predictions_from_ann` method that does the following:
         
     | 
| 
      
 3508 
     | 
    
         
            +
                    - Checks inference settings for the IoU threshold (`existing_objects_iou_thresh`)
         
     | 
| 
      
 3509 
     | 
    
         
            +
                    - Gets ProjectMeta object if not provided
         
     | 
| 
      
 3510 
     | 
    
         
            +
                    - Downloads GT annotations for the specified image IDs
         
     | 
| 
      
 3511 
     | 
    
         
            +
                    - Filters out predictions that have an IoU greater than or equal to the specified threshold with any GT object
         
     | 
| 
      
 3512 
     | 
    
         
            +
             
     | 
| 
      
 3513 
     | 
    
         
            +
                    :param api: Supervisely API object
         
     | 
| 
      
 3514 
     | 
    
         
            +
                    :type api: Api
         
     | 
| 
      
 3515 
     | 
    
         
            +
                    :param pred_anns: List of Annotation objects containing predictions
         
     | 
| 
      
 3516 
     | 
    
         
            +
                    :type pred_anns: List[Annotation]
         
     | 
| 
      
 3517 
     | 
    
         
            +
                    :param settings: Inference settings
         
     | 
| 
      
 3518 
     | 
    
         
            +
                    :type settings: dict
         
     | 
| 
      
 3519 
     | 
    
         
            +
                    :param dataset_id: ID of the dataset containing the images
         
     | 
| 
      
 3520 
     | 
    
         
            +
                    :type dataset_id: int
         
     | 
| 
      
 3521 
     | 
    
         
            +
                    :param gt_image_ids: List of image IDs to filter predictions. All images should belong to the same dataset
         
     | 
| 
      
 3522 
     | 
    
         
            +
                    :type gt_image_ids: List[int]
         
     | 
| 
      
 3523 
     | 
    
         
            +
                    :param meta: ProjectMeta object
         
     | 
| 
      
 3524 
     | 
    
         
            +
                    :type meta: Optional[ProjectMeta]
         
     | 
| 
      
 3525 
     | 
    
         
            +
                    :return: List of Annotation objects containing filtered predictions
         
     | 
| 
      
 3526 
     | 
    
         
            +
                    :rtype: List[Annotation]
         
     | 
| 
      
 3527 
     | 
    
         
            +
             
     | 
| 
      
 3528 
     | 
    
         
            +
                    Notes:
         
     | 
| 
      
 3529 
     | 
    
         
            +
                    ------
         
     | 
| 
      
 3530 
     | 
    
         
            +
                    - Requires PyTorch and torchvision for IoU calculations
         
     | 
| 
      
 3531 
     | 
    
         
            +
                    - This method is useful for identifying new objects that aren't already annotated in the ground truth
         
     | 
| 
      
 3532 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3533 
     | 
    
         
            +
                    iou = settings.get("existing_objects_iou_thresh")
         
     | 
| 
      
 3534 
     | 
    
         
            +
                    if isinstance(iou, float) and 0 < iou <= 1:
         
     | 
| 
      
 3535 
     | 
    
         
            +
                        if meta is None:
         
     | 
| 
      
 3536 
     | 
    
         
            +
                            ds = api.dataset.get_info_by_id(dataset_id)
         
     | 
| 
      
 3537 
     | 
    
         
            +
                            meta = ProjectMeta.from_json(api.project.get_meta(ds.project_id))
         
     | 
| 
      
 3538 
     | 
    
         
            +
                        gt_anns = api.annotation.download_json_batch(dataset_id, gt_image_ids)
         
     | 
| 
      
 3539 
     | 
    
         
            +
                        gt_anns = [Annotation.from_json(ann, meta) for ann in gt_anns]
         
     | 
| 
      
 3540 
     | 
    
         
            +
                        for i in range(0, len(pred_anns)):
         
     | 
| 
      
 3541 
     | 
    
         
            +
                            before = len(pred_anns[i].labels)
         
     | 
| 
      
 3542 
     | 
    
         
            +
                            with Timer() as timer:
         
     | 
| 
      
 3543 
     | 
    
         
            +
                                pred_anns[i] = self._filter_duplicated_predictions_from_ann(
         
     | 
| 
      
 3544 
     | 
    
         
            +
                                    gt_anns[i], pred_anns[i], iou
         
     | 
| 
      
 3545 
     | 
    
         
            +
                                )
         
     | 
| 
      
 3546 
     | 
    
         
            +
                            after = len(pred_anns[i].labels)
         
     | 
| 
      
 3547 
     | 
    
         
            +
                            logger.debug(
         
     | 
| 
      
 3548 
     | 
    
         
            +
                                f"{[i]}: applied NMS with IoU={iou}. Before: {before}, After: {after}. Time: {timer.get_time():.3f}ms"
         
     | 
| 
      
 3549 
     | 
    
         
            +
                            )
         
     | 
| 
      
 3550 
     | 
    
         
            +
                    return pred_anns
         
     | 
| 
      
 3551 
     | 
    
         
            +
             
     | 
| 
      
 3552 
     | 
    
         
            +
                def _filter_duplicated_predictions_from_ann(
         
     | 
| 
      
 3553 
     | 
    
         
            +
                    self, gt_ann: Annotation, pred_ann: Annotation, iou_threshold: float
         
     | 
| 
      
 3554 
     | 
    
         
            +
                ) -> Annotation:
         
     | 
| 
      
 3555 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3556 
     | 
    
         
            +
                    Filter out predictions that significantly overlap with ground truth annotations.
         
     | 
| 
      
 3557 
     | 
    
         
            +
             
     | 
| 
      
 3558 
     | 
    
         
            +
                    This function compares each prediction with ground truth annotations of the same class
         
     | 
| 
      
 3559 
     | 
    
         
            +
                    and removes predictions that have an IoU (Intersection over Union) greater than or equal
         
     | 
| 
      
 3560 
     | 
    
         
            +
                    to the specified threshold with any ground truth annotation. This is useful for identifying
         
     | 
| 
      
 3561 
     | 
    
         
            +
                    new objects that aren't already annotated in the ground truth.
         
     | 
| 
      
 3562 
     | 
    
         
            +
             
     | 
| 
      
 3563 
     | 
    
         
            +
                    :param gt_ann: Annotation object containing ground truth labels
         
     | 
| 
      
 3564 
     | 
    
         
            +
                    :type gt_ann: Annotation
         
     | 
| 
      
 3565 
     | 
    
         
            +
                    :param pred_ann: Annotation object containing prediction labels to be filtered
         
     | 
| 
      
 3566 
     | 
    
         
            +
                    :type pred_ann: Annotation
         
     | 
| 
      
 3567 
     | 
    
         
            +
                    :param iou_threshold:   IoU threshold (0.0-1.0). Predictions with IoU >= threshold with any
         
     | 
| 
      
 3568 
     | 
    
         
            +
                                            ground truth box of the same class will be removed
         
     | 
| 
      
 3569 
     | 
    
         
            +
                    :type iou_threshold: float
         
     | 
| 
      
 3570 
     | 
    
         
            +
                    :return: A new annotation object containing only predictions that don't significantly
         
     | 
| 
      
 3571 
     | 
    
         
            +
                             overlap with ground truth annotations
         
     | 
| 
      
 3572 
     | 
    
         
            +
                    :rtype: Annotation
         
     | 
| 
      
 3573 
     | 
    
         
            +
             
     | 
| 
      
 3574 
     | 
    
         
            +
             
     | 
| 
      
 3575 
     | 
    
         
            +
                    Notes:
         
     | 
| 
      
 3576 
     | 
    
         
            +
                    ------
         
     | 
| 
      
 3577 
     | 
    
         
            +
                    - Predictions with classes not present in ground truth will be kept
         
     | 
| 
      
 3578 
     | 
    
         
            +
                    - Requires PyTorch and torchvision for IoU calculations
         
     | 
| 
      
 3579 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3580 
     | 
    
         
            +
             
     | 
| 
      
 3581 
     | 
    
         
            +
                    try:
         
     | 
| 
      
 3582 
     | 
    
         
            +
                        import torch
         
     | 
| 
      
 3583 
     | 
    
         
            +
                        from torchvision.ops import box_iou
         
     | 
| 
      
 3584 
     | 
    
         
            +
             
     | 
| 
      
 3585 
     | 
    
         
            +
                    except ImportError:
         
     | 
| 
      
 3586 
     | 
    
         
            +
                        raise ImportError("Please install PyTorch and torchvision to use this feature.")
         
     | 
| 
      
 3587 
     | 
    
         
            +
             
     | 
| 
      
 3588 
     | 
    
         
            +
                    def _to_tensor(geom):
         
     | 
| 
      
 3589 
     | 
    
         
            +
                        return torch.tensor([geom.left, geom.top, geom.right, geom.bottom]).float()
         
     | 
| 
      
 3590 
     | 
    
         
            +
             
     | 
| 
      
 3591 
     | 
    
         
            +
                    new_labels = []
         
     | 
| 
      
 3592 
     | 
    
         
            +
                    pred_cls_bboxes = defaultdict(list)
         
     | 
| 
      
 3593 
     | 
    
         
            +
                    for label in pred_ann.labels:
         
     | 
| 
      
 3594 
     | 
    
         
            +
                        pred_cls_bboxes[label.obj_class.name].append(label)
         
     | 
| 
      
 3595 
     | 
    
         
            +
             
     | 
| 
      
 3596 
     | 
    
         
            +
                    gt_cls_bboxes = defaultdict(list)
         
     | 
| 
      
 3597 
     | 
    
         
            +
                    for label in gt_ann.labels:
         
     | 
| 
      
 3598 
     | 
    
         
            +
                        if label.obj_class.name not in pred_cls_bboxes:
         
     | 
| 
      
 3599 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 3600 
     | 
    
         
            +
                        gt_cls_bboxes[label.obj_class.name].append(label)
         
     | 
| 
      
 3601 
     | 
    
         
            +
             
     | 
| 
      
 3602 
     | 
    
         
            +
                    for name, pred in pred_cls_bboxes.items():
         
     | 
| 
      
 3603 
     | 
    
         
            +
                        gt = gt_cls_bboxes[name]
         
     | 
| 
      
 3604 
     | 
    
         
            +
                        if len(gt) == 0:
         
     | 
| 
      
 3605 
     | 
    
         
            +
                            new_labels.extend(pred)
         
     | 
| 
      
 3606 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 3607 
     | 
    
         
            +
                        pred_bboxes = torch.stack([_to_tensor(l.geometry.to_bbox()) for l in pred]).float()
         
     | 
| 
      
 3608 
     | 
    
         
            +
                        gt_bboxes = torch.stack([_to_tensor(l.geometry.to_bbox()) for l in gt]).float()
         
     | 
| 
      
 3609 
     | 
    
         
            +
                        iou_matrix = box_iou(pred_bboxes, gt_bboxes)
         
     | 
| 
      
 3610 
     | 
    
         
            +
                        iou_matrix = iou_matrix.cpu().numpy()
         
     | 
| 
      
 3611 
     | 
    
         
            +
                        keep_indices = np.where(np.all(iou_matrix < iou_threshold, axis=1))[0]
         
     | 
| 
      
 3612 
     | 
    
         
            +
                        new_labels.extend([pred[i] for i in keep_indices])
         
     | 
| 
      
 3613 
     | 
    
         
            +
             
     | 
| 
      
 3614 
     | 
    
         
            +
                    return pred_ann.clone(labels=new_labels)
         
     | 
| 
      
 3615 
     | 
    
         
            +
             
     | 
| 
       3462 
3616 
     | 
    
         | 
| 
       3463 
3617 
     | 
    
         
             
            def _get_log_extra_for_inference_request(inference_request_uuid, inference_request: dict):
         
     | 
| 
       3464 
3618 
     | 
    
         
             
                log_extra = {
         
     | 
    
        supervisely/volume/volume.py
    CHANGED
    
    | 
         @@ -799,6 +799,49 @@ def convert_nifti_to_nrrd(path: str) -> Tuple[np.ndarray, dict]: 
     | 
|
| 
       799 
799 
     | 
    
         
             
                }
         
     | 
| 
       800 
800 
     | 
    
         
             
                return data, header
         
     | 
| 
       801 
801 
     | 
    
         | 
| 
      
 802 
     | 
    
         
            +
            def convert_3d_nifti_to_nrrd(path: str) -> Tuple[np.ndarray, dict]:
         
     | 
| 
      
 803 
     | 
    
         
            +
                """Convert 3D NIFTI volume to NRRD format.
         
     | 
| 
      
 804 
     | 
    
         
            +
                Volume automatically reordered to RAS orientation as closest to canonical.
         
     | 
| 
      
 805 
     | 
    
         
            +
             
     | 
| 
      
 806 
     | 
    
         
            +
                :param path: Path to NIFTI volume file.
         
     | 
| 
      
 807 
     | 
    
         
            +
                :type path: str
         
     | 
| 
      
 808 
     | 
    
         
            +
                :return: Volume data in NumPy array format and dictionary with metadata (NRRD header).
         
     | 
| 
      
 809 
     | 
    
         
            +
                :rtype: Tuple[np.ndarray, dict]
         
     | 
| 
      
 810 
     | 
    
         
            +
                :Usage example:
         
     | 
| 
      
 811 
     | 
    
         
            +
             
     | 
| 
      
 812 
     | 
    
         
            +
                 .. code-block:: python
         
     | 
| 
      
 813 
     | 
    
         
            +
             
     | 
| 
      
 814 
     | 
    
         
            +
                    import supervisely as sly
         
     | 
| 
      
 815 
     | 
    
         
            +
             
     | 
| 
      
 816 
     | 
    
         
            +
                    path = "/home/admin/work/volumes/vol_01.nii"
         
     | 
| 
      
 817 
     | 
    
         
            +
                    data, header = sly.volume.convert_nifti_to_nrrd(path)
         
     | 
| 
      
 818 
     | 
    
         
            +
                """
         
     | 
| 
      
 819 
     | 
    
         
            +
             
     | 
| 
      
 820 
     | 
    
         
            +
                import nibabel as nib  # pylint: disable=import-error
         
     | 
| 
      
 821 
     | 
    
         
            +
             
     | 
| 
      
 822 
     | 
    
         
            +
                orientation_map = {
         
     | 
| 
      
 823 
     | 
    
         
            +
                    ('R', 'A', 'S'): "right-anterior-superior",
         
     | 
| 
      
 824 
     | 
    
         
            +
                    ('L', 'P', 'S'): "left-posterior-superior",
         
     | 
| 
      
 825 
     | 
    
         
            +
                    ('R', 'P', 'I'): "right-posterior-inferior",
         
     | 
| 
      
 826 
     | 
    
         
            +
                    ('L', 'A', 'I'): "left-anterior-inferior"
         
     | 
| 
      
 827 
     | 
    
         
            +
                }
         
     | 
| 
      
 828 
     | 
    
         
            +
                nifti = nib.load(path)
         
     | 
| 
      
 829 
     | 
    
         
            +
                reordered_to_ras_nifti = nib.as_closest_canonical(nifti)
         
     | 
| 
      
 830 
     | 
    
         
            +
                data = reordered_to_ras_nifti.get_fdata()
         
     | 
| 
      
 831 
     | 
    
         
            +
                affine = reordered_to_ras_nifti.affine
         
     | 
| 
      
 832 
     | 
    
         
            +
                orientation = nib.aff2axcodes(affine)
         
     | 
| 
      
 833 
     | 
    
         
            +
                space_directions = affine[:3, :3].tolist()
         
     | 
| 
      
 834 
     | 
    
         
            +
                space_origin = affine[:3, 3].tolist()
         
     | 
| 
      
 835 
     | 
    
         
            +
                header = {
         
     | 
| 
      
 836 
     | 
    
         
            +
                    "space": orientation_map.get(orientation, "unknown"),
         
     | 
| 
      
 837 
     | 
    
         
            +
                    "space directions": space_directions,
         
     | 
| 
      
 838 
     | 
    
         
            +
                    "space origin": space_origin,
         
     | 
| 
      
 839 
     | 
    
         
            +
                    "sizes": data.shape,
         
     | 
| 
      
 840 
     | 
    
         
            +
                    "type": str(data.dtype),
         
     | 
| 
      
 841 
     | 
    
         
            +
                    "dimension": len(data.shape),
         
     | 
| 
      
 842 
     | 
    
         
            +
                }
         
     | 
| 
      
 843 
     | 
    
         
            +
                return data, header
         
     | 
| 
      
 844 
     | 
    
         
            +
             
     | 
| 
       802 
845 
     | 
    
         | 
| 
       803 
846 
     | 
    
         
             
            def is_nifti_file(path: str) -> bool:
         
     | 
| 
       804 
847 
     | 
    
         
             
                """Check if the file is a NIFTI file.
         
     | 
| 
         @@ -5,7 +5,7 @@ supervisely/function_wrapper.py,sha256=R5YajTQ0GnRp2vtjwfC9hINkzQc0JiyGsu8TER373 
     | 
|
| 
       5 
5 
     | 
    
         
             
            supervisely/sly_logger.py,sha256=z92Vu5hmC0GgTIJO1n6kPDayRW9__8ix8hL6poDZj-Y,6274
         
     | 
| 
       6 
6 
     | 
    
         
             
            supervisely/tiny_timer.py,sha256=hkpe_7FE6bsKL79blSs7WBaktuPavEVu67IpEPrfmjE,183
         
     | 
| 
       7 
7 
     | 
    
         
             
            supervisely/annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       8 
     | 
    
         
            -
            supervisely/annotation/annotation.py,sha256= 
     | 
| 
      
 8 
     | 
    
         
            +
            supervisely/annotation/annotation.py,sha256=x1RizD9DPiwk14Mf8xGvuwPdzx_zI5Zx1CVvmCy_sII,114665
         
     | 
| 
       9 
9 
     | 
    
         
             
            supervisely/annotation/annotation_transforms.py,sha256=TlVy_gUbM-XH6GbLpZPrAi6pMIGTr7Ow02iSKOSTa-I,9582
         
     | 
| 
       10 
10 
     | 
    
         
             
            supervisely/annotation/json_geometries_map.py,sha256=nL6AmMhFy02fw9ryBm75plKyOkDh61QdOToSuLAcz_Q,1659
         
     | 
| 
       11 
11 
     | 
    
         
             
            supervisely/annotation/label.py,sha256=NpHZ5o2H6dI4KiII22o2HpiLXG1yekh-bEy8WvI2Ljg,37498
         
     | 
| 
         @@ -565,7 +565,7 @@ supervisely/collection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3 
     | 
|
| 
       565 
565 
     | 
    
         
             
            supervisely/collection/key_indexed_collection.py,sha256=x2UVlkprspWhhae9oLUzjTWBoIouiWY9UQSS_MozfH0,37643
         
     | 
| 
       566 
566 
     | 
    
         
             
            supervisely/collection/str_enum.py,sha256=Zp29yFGvnxC6oJRYNNlXhO2lTSdsriU1wiGHj6ahEJE,1250
         
     | 
| 
       567 
567 
     | 
    
         
             
            supervisely/convert/__init__.py,sha256=ropgB1eebG2bfLoJyf2jp8Vv9UkFujaW3jVX-71ho1g,1353
         
     | 
| 
       568 
     | 
    
         
            -
            supervisely/convert/base_converter.py,sha256= 
     | 
| 
      
 568 
     | 
    
         
            +
            supervisely/convert/base_converter.py,sha256=O2SP4I_Hd0aSn8kbOUocy8orkc_-iD-TQ-z4ieUqabA,18579
         
     | 
| 
       569 
569 
     | 
    
         
             
            supervisely/convert/converter.py,sha256=tWxTDfFv7hwzQhUQrBxzfr6WP8FUGFX_ewg5T2HbUYo,8959
         
     | 
| 
       570 
570 
     | 
    
         
             
            supervisely/convert/image/__init__.py,sha256=JEuyaBiiyiYmEUYqdn8Mog5FVXpz0H1zFubKkOOm73I,1395
         
     | 
| 
       571 
571 
     | 
    
         
             
            supervisely/convert/image/image_converter.py,sha256=8vak8ZoKTN1ye2ZmCTvCZ605-Rw1AFLIEo7bJMfnR68,10426
         
     | 
| 
         @@ -634,10 +634,13 @@ supervisely/convert/pointcloud/ply/ply_helper.py,sha256=YfLiV9m6a4NNEMs0J32dmMTL 
     | 
|
| 
       634 
634 
     | 
    
         
             
            supervisely/convert/pointcloud/sly/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       635 
635 
     | 
    
         
             
            supervisely/convert/pointcloud/sly/sly_pointcloud_converter.py,sha256=r56Rwil-55cRnd0sIePFGrf_xXa-lKQSfwhEUrjOquk,5070
         
     | 
| 
       636 
636 
     | 
    
         
             
            supervisely/convert/pointcloud/sly/sly_pointcloud_helper.py,sha256=kOluL97FfCFfIvnUE_FeN8iQLMlwdiMR5gayorOGDXw,3968
         
     | 
| 
       637 
     | 
    
         
            -
            supervisely/convert/pointcloud_episodes/__init__.py,sha256= 
     | 
| 
      
 637 
     | 
    
         
            +
            supervisely/convert/pointcloud_episodes/__init__.py,sha256=LePLQFEjXwhXap2zOY9SVTbW_NMbxKYZKBjBdRLimKE,557
         
     | 
| 
       638 
638 
     | 
    
         
             
            supervisely/convert/pointcloud_episodes/pointcloud_episodes_converter.py,sha256=qULUzO96BvWgNVmyxSQ0pUPBPG3WHgUJuK_U7Z8NM-g,9428
         
     | 
| 
       639 
639 
     | 
    
         
             
            supervisely/convert/pointcloud_episodes/bag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       640 
640 
     | 
    
         
             
            supervisely/convert/pointcloud_episodes/bag/bag_converter.py,sha256=jzWKXoFUWu11d5WlPfT1hphCubYpq_lhQZmhh07xZdQ,1659
         
     | 
| 
      
 641 
     | 
    
         
            +
            supervisely/convert/pointcloud_episodes/kitti_360/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
      
 642 
     | 
    
         
            +
            supervisely/convert/pointcloud_episodes/kitti_360/kitti_360_converter.py,sha256=ls3Pgf9WYTtaTzf6nLCL3gMjG6zZ_EAVKE5OJSFAOPc,10033
         
     | 
| 
      
 643 
     | 
    
         
            +
            supervisely/convert/pointcloud_episodes/kitti_360/kitti_360_helper.py,sha256=EHyJTRfIpUC3lETJOCTI_OY4ddmT0eTFLMMhOvSeCm0,12372
         
     | 
| 
       641 
644 
     | 
    
         
             
            supervisely/convert/pointcloud_episodes/lyft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       642 
645 
     | 
    
         
             
            supervisely/convert/pointcloud_episodes/lyft/lyft_converter.py,sha256=QXreWUJ-QhoWgLPqRxCayatYCCCuSV6Z2XCZKScrD3o,10419
         
     | 
| 
       643 
646 
     | 
    
         
             
            supervisely/convert/pointcloud_episodes/nuscenes_conv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
         @@ -655,11 +658,14 @@ supervisely/convert/video/mot/mot_converter.py,sha256=wXbv-9Psc2uVnhzHuOt5VnRIvS 
     | 
|
| 
       655 
658 
     | 
    
         
             
            supervisely/convert/video/sly/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       656 
659 
     | 
    
         
             
            supervisely/convert/video/sly/sly_video_converter.py,sha256=S2qif7JFxqIi9VN_ez_iBtoJXpG9W6Ky2k5Er3-DtUo,4418
         
     | 
| 
       657 
660 
     | 
    
         
             
            supervisely/convert/video/sly/sly_video_helper.py,sha256=D8PgoXpi0y3z-VEqvBLDf_gSUQ2hTL3irrfJyGhaV0Y,6758
         
     | 
| 
       658 
     | 
    
         
            -
            supervisely/convert/volume/__init__.py,sha256= 
     | 
| 
      
 661 
     | 
    
         
            +
            supervisely/convert/volume/__init__.py,sha256=NjVfOa9uH1BdYvB-RynW6L28x0f_tqL9p7tHSIQ6Sso,245
         
     | 
| 
       659 
662 
     | 
    
         
             
            supervisely/convert/volume/volume_converter.py,sha256=3jpt2Yn_G4FSP_vHFsJHQfYNQpT7q6ar_sRyr_xrPnA,5335
         
     | 
| 
       660 
663 
     | 
    
         
             
            supervisely/convert/volume/dicom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       661 
664 
     | 
    
         
             
            supervisely/convert/volume/dicom/dicom_converter.py,sha256=__QP8fMAaq_BdWFYh1_nAYT2gpY1WwZzdlDj39YwHhw,3195
         
     | 
| 
       662 
665 
     | 
    
         
             
            supervisely/convert/volume/dicom/dicom_helper.py,sha256=1EXmxl5Z8Xi3ZkZnfJ4EbiPCVyITSXUc0Cn_oo02pPE,1284
         
     | 
| 
      
 666 
     | 
    
         
            +
            supervisely/convert/volume/nii/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
      
 667 
     | 
    
         
            +
            supervisely/convert/volume/nii/nii_volume_converter.py,sha256=kI2JmeFuLfLWgYGCEozoaka1QH4TocnfgyN0em6maa0,5946
         
     | 
| 
      
 668 
     | 
    
         
            +
            supervisely/convert/volume/nii/nii_volume_helper.py,sha256=kzh20fsdeI8efA0vawW0M6Wh48nMlCLzHBQFuSNVFmc,1136
         
     | 
| 
       663 
669 
     | 
    
         
             
            supervisely/convert/volume/sly/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       664 
670 
     | 
    
         
             
            supervisely/convert/volume/sly/sly_volume_converter.py,sha256=XmSuxnRqxchG87b244f3h0UHvOt6IkajMquL1drWlCM,5595
         
     | 
| 
       665 
671 
     | 
    
         
             
            supervisely/convert/volume/sly/sly_volume_helper.py,sha256=gUY0GW3zDMlO2y-zQQG36uoXMrKkKz4-ErM1CDxFCxE,5620
         
     | 
| 
         @@ -876,7 +882,7 @@ supervisely/nn/benchmark/visualization/widgets/table/__init__.py,sha256=47DEQpj8 
     | 
|
| 
       876 
882 
     | 
    
         
             
            supervisely/nn/benchmark/visualization/widgets/table/table.py,sha256=atmDnF1Af6qLQBUjLhK18RMDKAYlxnsuVHMSEa5a-e8,4319
         
     | 
| 
       877 
883 
     | 
    
         
             
            supervisely/nn/inference/__init__.py,sha256=QFukX2ip-U7263aEPCF_UCFwj6EujbMnsgrXp5Bbt8I,1623
         
     | 
| 
       878 
884 
     | 
    
         
             
            supervisely/nn/inference/cache.py,sha256=q4F7ZRzZghNWSVFClXEIHNMNW4PK6xddYckCFUgyhCo,32027
         
     | 
| 
       879 
     | 
    
         
            -
            supervisely/nn/inference/inference.py,sha256= 
     | 
| 
      
 885 
     | 
    
         
            +
            supervisely/nn/inference/inference.py,sha256=SqfIgohv0U3USQpHerzkrnfIeC7JKGeQA49Tocliu1k,165877
         
     | 
| 
       880 
886 
     | 
    
         
             
            supervisely/nn/inference/session.py,sha256=jmkkxbe2kH-lEgUU6Afh62jP68dxfhF5v6OGDfLU62E,35757
         
     | 
| 
       881 
887 
     | 
    
         
             
            supervisely/nn/inference/video_inference.py,sha256=8Bshjr6rDyLay5Za8IB8Dr6FURMO2R_v7aELasO8pR4,5746
         
     | 
| 
       882 
888 
     | 
    
         
             
            supervisely/nn/inference/gui/__init__.py,sha256=wCxd-lF5Zhcwsis-wScDA8n1Gk_1O00PKgDviUZ3F1U,221
         
     | 
| 
         @@ -1053,7 +1059,7 @@ supervisely/volume/__init__.py,sha256=EBZBY_5mzabXzMUQh5akusIGd16XnX9n8J0jIi_JmW 
     | 
|
| 
       1053 
1059 
     | 
    
         
             
            supervisely/volume/nrrd_encoder.py,sha256=1lqwwyqxEvctw1ysQ70x4xPSV1uy1g5YcH5CURwL7-c,4084
         
     | 
| 
       1054 
1060 
     | 
    
         
             
            supervisely/volume/nrrd_loader.py,sha256=_yqahKcqSRxunHZ5LtnUWIRA7UvIhPKOhAUwYijSGY4,9065
         
     | 
| 
       1055 
1061 
     | 
    
         
             
            supervisely/volume/stl_converter.py,sha256=WIMQgHO_u4JT58QdcMXcb_euF1BFhM7D52IVX_0QTxE,6285
         
     | 
| 
       1056 
     | 
    
         
            -
            supervisely/volume/volume.py,sha256 
     | 
| 
      
 1062 
     | 
    
         
            +
            supervisely/volume/volume.py,sha256=7ebCIICqZMwRP3ruRy3PtSeiUpBpSlyRH20VJybBQbI,25828
         
     | 
| 
       1057 
1063 
     | 
    
         
             
            supervisely/volume_annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
         
     | 
| 
       1058 
1064 
     | 
    
         
             
            supervisely/volume_annotation/constants.py,sha256=BdFIh56fy7vzLIjt0gH8xP01EIU-qgQIwbSHVUcABCU,569
         
     | 
| 
       1059 
1065 
     | 
    
         
             
            supervisely/volume_annotation/plane.py,sha256=wyezAcc8tLp38O44CwWY0wjdQxf3VjRdFLWooCrk-Nw,16301
         
     | 
| 
         @@ -1075,9 +1081,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ 
     | 
|
| 
       1075 
1081 
     | 
    
         
             
            supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
         
     | 
| 
       1076 
1082 
     | 
    
         
             
            supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
         
     | 
| 
       1077 
1083 
     | 
    
         
             
            supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
         
     | 
| 
       1078 
     | 
    
         
            -
            supervisely-6.73. 
     | 
| 
       1079 
     | 
    
         
            -
            supervisely-6.73. 
     | 
| 
       1080 
     | 
    
         
            -
            supervisely-6.73. 
     | 
| 
       1081 
     | 
    
         
            -
            supervisely-6.73. 
     | 
| 
       1082 
     | 
    
         
            -
            supervisely-6.73. 
     | 
| 
       1083 
     | 
    
         
            -
            supervisely-6.73. 
     | 
| 
      
 1084 
     | 
    
         
            +
            supervisely-6.73.323.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
         
     | 
| 
      
 1085 
     | 
    
         
            +
            supervisely-6.73.323.dist-info/METADATA,sha256=uIqQoH6i-OiLhSZSLt6SqL7O1ZWPV0D6ZRJICli80eE,33596
         
     | 
| 
      
 1086 
     | 
    
         
            +
            supervisely-6.73.323.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
         
     | 
| 
      
 1087 
     | 
    
         
            +
            supervisely-6.73.323.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
         
     | 
| 
      
 1088 
     | 
    
         
            +
            supervisely-6.73.323.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
         
     | 
| 
      
 1089 
     | 
    
         
            +
            supervisely-6.73.323.dist-info/RECORD,,
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     |