supervisely 6.73.320__py3-none-any.whl → 6.73.322__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/_utils.py +33 -0
 - supervisely/api/api.py +17 -13
 - supervisely/api/file_api.py +158 -25
 - supervisely/convert/base_converter.py +1 -0
 - supervisely/convert/pointcloud_episodes/__init__.py +1 -0
 - supervisely/convert/pointcloud_episodes/kitti_360/__init__.py +0 -0
 - supervisely/convert/pointcloud_episodes/kitti_360/kitti_360_converter.py +242 -0
 - supervisely/convert/pointcloud_episodes/kitti_360/kitti_360_helper.py +386 -0
 - supervisely/io/fs.py +81 -4
 - supervisely/nn/inference/inference.py +155 -1
 - supervisely/nn/training/train_app.py +48 -36
 - {supervisely-6.73.320.dist-info → supervisely-6.73.322.dist-info}/METADATA +1 -1
 - {supervisely-6.73.320.dist-info → supervisely-6.73.322.dist-info}/RECORD +17 -14
 - {supervisely-6.73.320.dist-info → supervisely-6.73.322.dist-info}/LICENSE +0 -0
 - {supervisely-6.73.320.dist-info → supervisely-6.73.322.dist-info}/WHEEL +0 -0
 - {supervisely-6.73.320.dist-info → supervisely-6.73.322.dist-info}/entry_points.txt +0 -0
 - {supervisely-6.73.320.dist-info → supervisely-6.73.322.dist-info}/top_level.txt +0 -0
 
| 
         @@ -0,0 +1,386 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            from supervisely import logger
         
     | 
| 
      
 2 
     | 
    
         
            +
            from supervisely.io.fs import get_file_name
         
     | 
| 
      
 3 
     | 
    
         
            +
            from supervisely.geometry.cuboid_3d import Cuboid3d
         
     | 
| 
      
 4 
     | 
    
         
            +
            from supervisely.geometry.point_3d import Vector3d
         
     | 
| 
      
 5 
     | 
    
         
            +
            from supervisely.geometry.point import Point
         
     | 
| 
      
 6 
     | 
    
         
            +
             
     | 
| 
      
 7 
     | 
    
         
            +
            from collections import defaultdict
         
     | 
| 
      
 8 
     | 
    
         
            +
            import os
         
     | 
| 
      
 9 
     | 
    
         
            +
            import numpy as np
         
     | 
| 
      
 10 
     | 
    
         
            +
             
     | 
| 
      
 11 
     | 
    
         
            +
             
     | 
| 
      
 12 
     | 
    
         
            +
            MAX_N = 1000
         
     | 
| 
      
 13 
     | 
    
         
            +
             
     | 
| 
      
 14 
     | 
    
         
            +
             
     | 
| 
      
 15 
     | 
    
         
            +
            def local2global(semanticId, instanceId):
         
     | 
| 
      
 16 
     | 
    
         
            +
                globalId = semanticId * MAX_N + instanceId
         
     | 
| 
      
 17 
     | 
    
         
            +
                if isinstance(globalId, np.ndarray):
         
     | 
| 
      
 18 
     | 
    
         
            +
                    return globalId.astype(np.int)
         
     | 
| 
      
 19 
     | 
    
         
            +
                else:
         
     | 
| 
      
 20 
     | 
    
         
            +
                    return int(globalId)
         
     | 
| 
      
 21 
     | 
    
         
            +
             
     | 
| 
      
 22 
     | 
    
         
            +
             
     | 
| 
      
 23 
     | 
    
         
            +
            def global2local(globalId):
         
     | 
| 
      
 24 
     | 
    
         
            +
                semanticId = globalId // MAX_N
         
     | 
| 
      
 25 
     | 
    
         
            +
                instanceId = globalId % MAX_N
         
     | 
| 
      
 26 
     | 
    
         
            +
                if isinstance(globalId, np.ndarray):
         
     | 
| 
      
 27 
     | 
    
         
            +
                    return semanticId.astype(int), instanceId.astype(int)
         
     | 
| 
      
 28 
     | 
    
         
            +
                else:
         
     | 
| 
      
 29 
     | 
    
         
            +
                    return int(semanticId), int(instanceId)
         
     | 
| 
      
 30 
     | 
    
         
            +
             
     | 
| 
      
 31 
     | 
    
         
            +
             
     | 
| 
      
 32 
     | 
    
         
            +
            annotation2global = defaultdict()
         
     | 
| 
      
 33 
     | 
    
         
            +
             
     | 
| 
      
 34 
     | 
    
         
            +
             
     | 
| 
      
 35 
     | 
    
         
            +
            # Abstract base class for annotation objects
         
     | 
| 
      
 36 
     | 
    
         
            +
            class KITTI360Object:
         
     | 
| 
      
 37 
     | 
    
         
            +
                from abc import ABCMeta
         
     | 
| 
      
 38 
     | 
    
         
            +
             
     | 
| 
      
 39 
     | 
    
         
            +
                __metaclass__ = ABCMeta
         
     | 
| 
      
 40 
     | 
    
         
            +
             
     | 
| 
      
 41 
     | 
    
         
            +
                def __init__(self):
         
     | 
| 
      
 42 
     | 
    
         
            +
                    from matplotlib import cm
         
     | 
| 
      
 43 
     | 
    
         
            +
             
     | 
| 
      
 44 
     | 
    
         
            +
                    # the label
         
     | 
| 
      
 45 
     | 
    
         
            +
                    self.label = ""
         
     | 
| 
      
 46 
     | 
    
         
            +
             
     | 
| 
      
 47 
     | 
    
         
            +
                    # colormap
         
     | 
| 
      
 48 
     | 
    
         
            +
                    self.cmap = cm.get_cmap("Set1")
         
     | 
| 
      
 49 
     | 
    
         
            +
                    self.cmap_length = 9
         
     | 
| 
      
 50 
     | 
    
         
            +
             
     | 
| 
      
 51 
     | 
    
         
            +
                def getColor(self, idx):
         
     | 
| 
      
 52 
     | 
    
         
            +
                    if idx == 0:
         
     | 
| 
      
 53 
     | 
    
         
            +
                        return np.array([0, 0, 0])
         
     | 
| 
      
 54 
     | 
    
         
            +
                    return np.asarray(self.cmap(idx % self.cmap_length)[:3]) * 255.0
         
     | 
| 
      
 55 
     | 
    
         
            +
             
     | 
| 
      
 56 
     | 
    
         
            +
                # def assignColor(self):
         
     | 
| 
      
 57 
     | 
    
         
            +
                #     from kitti360scripts.helpers.labels import id2label  # pylint: disable=import-error
         
     | 
| 
      
 58 
     | 
    
         
            +
             
     | 
| 
      
 59 
     | 
    
         
            +
                #     if self.semanticId >= 0:
         
     | 
| 
      
 60 
     | 
    
         
            +
                #         self.semanticColor = id2label[self.semanticId].color
         
     | 
| 
      
 61 
     | 
    
         
            +
                #         if self.instanceId > 0:
         
     | 
| 
      
 62 
     | 
    
         
            +
                #             self.instanceColor = self.getColor(self.instanceId)
         
     | 
| 
      
 63 
     | 
    
         
            +
                #         else:
         
     | 
| 
      
 64 
     | 
    
         
            +
                #             self.instanceColor = self.semanticColor
         
     | 
| 
      
 65 
     | 
    
         
            +
             
     | 
| 
      
 66 
     | 
    
         
            +
             
     | 
| 
      
 67 
     | 
    
         
            +
            # Class that contains the information of a single annotated object as 3D bounding box
         
     | 
| 
      
 68 
     | 
    
         
            +
            class KITTI360Bbox3D(KITTI360Object):
         
     | 
| 
      
 69 
     | 
    
         
            +
                # Constructor
         
     | 
| 
      
 70 
     | 
    
         
            +
                def __init__(self):
         
     | 
| 
      
 71 
     | 
    
         
            +
                    KITTI360Object.__init__(self)
         
     | 
| 
      
 72 
     | 
    
         
            +
                    # the polygon as list of points
         
     | 
| 
      
 73 
     | 
    
         
            +
                    self.vertices = []
         
     | 
| 
      
 74 
     | 
    
         
            +
                    self.faces = []
         
     | 
| 
      
 75 
     | 
    
         
            +
                    self.lines = [
         
     | 
| 
      
 76 
     | 
    
         
            +
                        [0, 5],
         
     | 
| 
      
 77 
     | 
    
         
            +
                        [1, 4],
         
     | 
| 
      
 78 
     | 
    
         
            +
                        [2, 7],
         
     | 
| 
      
 79 
     | 
    
         
            +
                        [3, 6],
         
     | 
| 
      
 80 
     | 
    
         
            +
                        [0, 1],
         
     | 
| 
      
 81 
     | 
    
         
            +
                        [1, 3],
         
     | 
| 
      
 82 
     | 
    
         
            +
                        [3, 2],
         
     | 
| 
      
 83 
     | 
    
         
            +
                        [2, 0],
         
     | 
| 
      
 84 
     | 
    
         
            +
                        [4, 5],
         
     | 
| 
      
 85 
     | 
    
         
            +
                        [5, 7],
         
     | 
| 
      
 86 
     | 
    
         
            +
                        [7, 6],
         
     | 
| 
      
 87 
     | 
    
         
            +
                        [6, 4],
         
     | 
| 
      
 88 
     | 
    
         
            +
                    ]
         
     | 
| 
      
 89 
     | 
    
         
            +
             
     | 
| 
      
 90 
     | 
    
         
            +
                    # the ID of the corresponding object
         
     | 
| 
      
 91 
     | 
    
         
            +
                    self.semanticId = -1
         
     | 
| 
      
 92 
     | 
    
         
            +
                    self.instanceId = -1
         
     | 
| 
      
 93 
     | 
    
         
            +
                    self.annotationId = -1
         
     | 
| 
      
 94 
     | 
    
         
            +
             
     | 
| 
      
 95 
     | 
    
         
            +
                    # the window that contains the bbox
         
     | 
| 
      
 96 
     | 
    
         
            +
                    self.start_frame = -1
         
     | 
| 
      
 97 
     | 
    
         
            +
                    self.end_frame = -1
         
     | 
| 
      
 98 
     | 
    
         
            +
             
     | 
| 
      
 99 
     | 
    
         
            +
                    # timestamp of the bbox (-1 if statis)
         
     | 
| 
      
 100 
     | 
    
         
            +
                    self.timestamp = -1
         
     | 
| 
      
 101 
     | 
    
         
            +
             
     | 
| 
      
 102 
     | 
    
         
            +
                    # projected vertices
         
     | 
| 
      
 103 
     | 
    
         
            +
                    self.vertices_proj = None
         
     | 
| 
      
 104 
     | 
    
         
            +
                    self.meshes = []
         
     | 
| 
      
 105 
     | 
    
         
            +
             
     | 
| 
      
 106 
     | 
    
         
            +
                    # name
         
     | 
| 
      
 107 
     | 
    
         
            +
                    self.name = ""
         
     | 
| 
      
 108 
     | 
    
         
            +
             
     | 
| 
      
 109 
     | 
    
         
            +
                def __str__(self):
         
     | 
| 
      
 110 
     | 
    
         
            +
                    return self.name
         
     | 
| 
      
 111 
     | 
    
         
            +
             
     | 
| 
      
 112 
     | 
    
         
            +
                # def generateMeshes(self):
         
     | 
| 
      
 113 
     | 
    
         
            +
                #     self.meshes = []
         
     | 
| 
      
 114 
     | 
    
         
            +
                #     if self.vertices_proj:
         
     | 
| 
      
 115 
     | 
    
         
            +
                #         for fidx in range(self.faces.shape[0]):
         
     | 
| 
      
 116 
     | 
    
         
            +
                #             self.meshes.append(
         
     | 
| 
      
 117 
     | 
    
         
            +
                #                 [
         
     | 
| 
      
 118 
     | 
    
         
            +
                #                     Point(self.vertices_proj[0][int(x)], self.vertices_proj[1][int(x)])
         
     | 
| 
      
 119 
     | 
    
         
            +
                #                     for x in self.faces[fidx]
         
     | 
| 
      
 120 
     | 
    
         
            +
                #                 ]
         
     | 
| 
      
 121 
     | 
    
         
            +
                #             )
         
     | 
| 
      
 122 
     | 
    
         
            +
             
     | 
| 
      
 123 
     | 
    
         
            +
                def parseOpencvMatrix(self, node):
         
     | 
| 
      
 124 
     | 
    
         
            +
                    rows = int(node.find("rows").text)
         
     | 
| 
      
 125 
     | 
    
         
            +
                    cols = int(node.find("cols").text)
         
     | 
| 
      
 126 
     | 
    
         
            +
                    data = node.find("data").text.split(" ")
         
     | 
| 
      
 127 
     | 
    
         
            +
             
     | 
| 
      
 128 
     | 
    
         
            +
                    mat = []
         
     | 
| 
      
 129 
     | 
    
         
            +
                    for d in data:
         
     | 
| 
      
 130 
     | 
    
         
            +
                        d = d.replace("\n", "")
         
     | 
| 
      
 131 
     | 
    
         
            +
                        if len(d) < 1:
         
     | 
| 
      
 132 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 133 
     | 
    
         
            +
                        mat.append(float(d))
         
     | 
| 
      
 134 
     | 
    
         
            +
                    mat = np.reshape(mat, [rows, cols])
         
     | 
| 
      
 135 
     | 
    
         
            +
                    return mat
         
     | 
| 
      
 136 
     | 
    
         
            +
             
     | 
| 
      
 137 
     | 
    
         
            +
                def parseVertices(self, child):
         
     | 
| 
      
 138 
     | 
    
         
            +
                    transform = self.parseOpencvMatrix(child.find("transform"))
         
     | 
| 
      
 139 
     | 
    
         
            +
                    R = transform[:3, :3]
         
     | 
| 
      
 140 
     | 
    
         
            +
                    T = transform[:3, 3]
         
     | 
| 
      
 141 
     | 
    
         
            +
                    vertices = self.parseOpencvMatrix(child.find("vertices"))
         
     | 
| 
      
 142 
     | 
    
         
            +
                    faces = self.parseOpencvMatrix(child.find("faces"))
         
     | 
| 
      
 143 
     | 
    
         
            +
             
     | 
| 
      
 144 
     | 
    
         
            +
                    vertices = np.matmul(R, vertices.transpose()).transpose() + T
         
     | 
| 
      
 145 
     | 
    
         
            +
                    self.vertices = vertices
         
     | 
| 
      
 146 
     | 
    
         
            +
                    self.faces = faces
         
     | 
| 
      
 147 
     | 
    
         
            +
                    self.R = R
         
     | 
| 
      
 148 
     | 
    
         
            +
                    self.T = T
         
     | 
| 
      
 149 
     | 
    
         
            +
             
     | 
| 
      
 150 
     | 
    
         
            +
                    self.transform = transform
         
     | 
| 
      
 151 
     | 
    
         
            +
             
     | 
| 
      
 152 
     | 
    
         
            +
                def parseBbox(self, child):
         
     | 
| 
      
 153 
     | 
    
         
            +
                    from kitti360scripts.helpers.labels import kittiId2label  # pylint: disable=import-error
         
     | 
| 
      
 154 
     | 
    
         
            +
             
     | 
| 
      
 155 
     | 
    
         
            +
                    semanticIdKITTI = int(child.find("semanticId").text)
         
     | 
| 
      
 156 
     | 
    
         
            +
                    self.semanticId = kittiId2label[semanticIdKITTI].id
         
     | 
| 
      
 157 
     | 
    
         
            +
                    self.instanceId = int(child.find("instanceId").text)
         
     | 
| 
      
 158 
     | 
    
         
            +
                    # self.name = str(child.find('label').text)
         
     | 
| 
      
 159 
     | 
    
         
            +
                    self.name = kittiId2label[semanticIdKITTI].name
         
     | 
| 
      
 160 
     | 
    
         
            +
             
     | 
| 
      
 161 
     | 
    
         
            +
                    self.start_frame = int(child.find("start_frame").text)
         
     | 
| 
      
 162 
     | 
    
         
            +
                    self.end_frame = int(child.find("end_frame").text)
         
     | 
| 
      
 163 
     | 
    
         
            +
             
     | 
| 
      
 164 
     | 
    
         
            +
                    self.timestamp = int(child.find("timestamp").text)
         
     | 
| 
      
 165 
     | 
    
         
            +
             
     | 
| 
      
 166 
     | 
    
         
            +
                    self.annotationId = int(child.find("index").text) + 1
         
     | 
| 
      
 167 
     | 
    
         
            +
             
     | 
| 
      
 168 
     | 
    
         
            +
                    global annotation2global
         
     | 
| 
      
 169 
     | 
    
         
            +
                    annotation2global[self.annotationId] = local2global(self.semanticId, self.instanceId)
         
     | 
| 
      
 170 
     | 
    
         
            +
                    self.parseVertices(child)
         
     | 
| 
      
 171 
     | 
    
         
            +
             
     | 
| 
      
 172 
     | 
    
         
            +
                def parseStuff(self, child):
         
     | 
| 
      
 173 
     | 
    
         
            +
                    from kitti360scripts.helpers.labels import name2label  # pylint: disable=import-error
         
     | 
| 
      
 174 
     | 
    
         
            +
             
     | 
| 
      
 175 
     | 
    
         
            +
                    classmap = {
         
     | 
| 
      
 176 
     | 
    
         
            +
                        "driveway": "parking",
         
     | 
| 
      
 177 
     | 
    
         
            +
                        "ground": "terrain",
         
     | 
| 
      
 178 
     | 
    
         
            +
                        "unknownGround": "ground",
         
     | 
| 
      
 179 
     | 
    
         
            +
                        "railtrack": "rail track",
         
     | 
| 
      
 180 
     | 
    
         
            +
                    }
         
     | 
| 
      
 181 
     | 
    
         
            +
                    label = child.find("label").text
         
     | 
| 
      
 182 
     | 
    
         
            +
                    if label in classmap.keys():
         
     | 
| 
      
 183 
     | 
    
         
            +
                        label = classmap[label]
         
     | 
| 
      
 184 
     | 
    
         
            +
             
     | 
| 
      
 185 
     | 
    
         
            +
                    self.start_frame = int(child.find("start_frame").text)
         
     | 
| 
      
 186 
     | 
    
         
            +
                    self.end_frame = int(child.find("end_frame").text)
         
     | 
| 
      
 187 
     | 
    
         
            +
             
     | 
| 
      
 188 
     | 
    
         
            +
                    self.semanticId = name2label[label].id
         
     | 
| 
      
 189 
     | 
    
         
            +
                    self.instanceId = 0
         
     | 
| 
      
 190 
     | 
    
         
            +
                    self.parseVertices(child)
         
     | 
| 
      
 191 
     | 
    
         
            +
             
     | 
| 
      
 192 
     | 
    
         
            +
             
     | 
| 
      
 193 
     | 
    
         
            +
            # Class that contains the information of the point cloud a single frame
         
     | 
| 
      
 194 
     | 
    
         
            +
            class KITTI360Point3D(KITTI360Object):
         
     | 
| 
      
 195 
     | 
    
         
            +
                # Constructor
         
     | 
| 
      
 196 
     | 
    
         
            +
                def __init__(self):
         
     | 
| 
      
 197 
     | 
    
         
            +
                    KITTI360Object.__init__(self)
         
     | 
| 
      
 198 
     | 
    
         
            +
             
     | 
| 
      
 199 
     | 
    
         
            +
                    self.vertices = []
         
     | 
| 
      
 200 
     | 
    
         
            +
             
     | 
| 
      
 201 
     | 
    
         
            +
                    self.vertices_proj = None
         
     | 
| 
      
 202 
     | 
    
         
            +
             
     | 
| 
      
 203 
     | 
    
         
            +
                    # the ID of the corresponding object
         
     | 
| 
      
 204 
     | 
    
         
            +
                    self.semanticId = -1
         
     | 
| 
      
 205 
     | 
    
         
            +
                    self.instanceId = -1
         
     | 
| 
      
 206 
     | 
    
         
            +
                    self.annotationId = -1
         
     | 
| 
      
 207 
     | 
    
         
            +
             
     | 
| 
      
 208 
     | 
    
         
            +
                    # name
         
     | 
| 
      
 209 
     | 
    
         
            +
                    self.name = ""
         
     | 
| 
      
 210 
     | 
    
         
            +
             
     | 
| 
      
 211 
     | 
    
         
            +
                    # color
         
     | 
| 
      
 212 
     | 
    
         
            +
                    self.semanticColor = None
         
     | 
| 
      
 213 
     | 
    
         
            +
                    self.instanceColor = None
         
     | 
| 
      
 214 
     | 
    
         
            +
             
     | 
| 
      
 215 
     | 
    
         
            +
                def __str__(self):
         
     | 
| 
      
 216 
     | 
    
         
            +
                    return self.name
         
     | 
| 
      
 217 
     | 
    
         
            +
             
     | 
| 
      
 218 
     | 
    
         
            +
                # def generateMeshes(self):
         
     | 
| 
      
 219 
     | 
    
         
            +
                #     pass
         
     | 
| 
      
 220 
     | 
    
         
            +
             
     | 
| 
      
 221 
     | 
    
         
            +
             
     | 
| 
      
 222 
     | 
    
         
            +
            # Meta class for KITTI360Bbox3D
         
     | 
| 
      
 223 
     | 
    
         
            +
            class Annotation3D:
         
     | 
| 
      
 224 
     | 
    
         
            +
                def __init__(self, labelPath):
         
     | 
| 
      
 225 
     | 
    
         
            +
                    from kitti360scripts.helpers.labels import labels  # pylint: disable=import-error
         
     | 
| 
      
 226 
     | 
    
         
            +
                    import xml.etree.ElementTree as ET
         
     | 
| 
      
 227 
     | 
    
         
            +
             
     | 
| 
      
 228 
     | 
    
         
            +
                    key_name = get_file_name(labelPath)
         
     | 
| 
      
 229 
     | 
    
         
            +
                    # load annotation
         
     | 
| 
      
 230 
     | 
    
         
            +
                    tree = ET.parse(labelPath)
         
     | 
| 
      
 231 
     | 
    
         
            +
                    root = tree.getroot()
         
     | 
| 
      
 232 
     | 
    
         
            +
             
     | 
| 
      
 233 
     | 
    
         
            +
                    self.objects = defaultdict(dict)
         
     | 
| 
      
 234 
     | 
    
         
            +
             
     | 
| 
      
 235 
     | 
    
         
            +
                    self.num_bbox = 0
         
     | 
| 
      
 236 
     | 
    
         
            +
                    for child in root:
         
     | 
| 
      
 237 
     | 
    
         
            +
                        if child.find("transform") is None:
         
     | 
| 
      
 238 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 239 
     | 
    
         
            +
                        obj = KITTI360Bbox3D()
         
     | 
| 
      
 240 
     | 
    
         
            +
                        obj.parseBbox(child)
         
     | 
| 
      
 241 
     | 
    
         
            +
                        globalId = local2global(obj.semanticId, obj.instanceId)
         
     | 
| 
      
 242 
     | 
    
         
            +
                        self.objects[globalId][obj.timestamp] = obj
         
     | 
| 
      
 243 
     | 
    
         
            +
                        self.num_bbox += 1
         
     | 
| 
      
 244 
     | 
    
         
            +
             
     | 
| 
      
 245 
     | 
    
         
            +
                    globalIds = np.asarray(list(self.objects.keys()))
         
     | 
| 
      
 246 
     | 
    
         
            +
                    semanticIds, instanceIds = global2local(globalIds)
         
     | 
| 
      
 247 
     | 
    
         
            +
                    for label in labels:
         
     | 
| 
      
 248 
     | 
    
         
            +
                        if label.hasInstances:
         
     | 
| 
      
 249 
     | 
    
         
            +
                            print(f"{label.name:<30}:\t {(semanticIds==label.id).sum()}")
         
     | 
| 
      
 250 
     | 
    
         
            +
                    print(f"Loaded {len(globalIds)} instances")
         
     | 
| 
      
 251 
     | 
    
         
            +
                    print(f"Loaded {self.num_bbox} boxes")
         
     | 
| 
      
 252 
     | 
    
         
            +
             
     | 
| 
      
 253 
     | 
    
         
            +
                def __call__(self, semanticId, instanceId, timestamp=None):
         
     | 
| 
      
 254 
     | 
    
         
            +
                    globalId = local2global(semanticId, instanceId)
         
     | 
| 
      
 255 
     | 
    
         
            +
                    if globalId in self.objects.keys():
         
     | 
| 
      
 256 
     | 
    
         
            +
                        # static object
         
     | 
| 
      
 257 
     | 
    
         
            +
                        if len(self.objects[globalId].keys()) == 1:
         
     | 
| 
      
 258 
     | 
    
         
            +
                            if -1 in self.objects[globalId].keys():
         
     | 
| 
      
 259 
     | 
    
         
            +
                                return self.objects[globalId][-1]
         
     | 
| 
      
 260 
     | 
    
         
            +
                            else:
         
     | 
| 
      
 261 
     | 
    
         
            +
                                return None
         
     | 
| 
      
 262 
     | 
    
         
            +
                        # dynamic object
         
     | 
| 
      
 263 
     | 
    
         
            +
                        else:
         
     | 
| 
      
 264 
     | 
    
         
            +
                            return self.objects[globalId][timestamp]
         
     | 
| 
      
 265 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 266 
     | 
    
         
            +
                        return None
         
     | 
| 
      
 267 
     | 
    
         
            +
             
     | 
| 
      
 268 
     | 
    
         
            +
                def get_objects(self):
         
     | 
| 
      
 269 
     | 
    
         
            +
                    return [list(obj.values())[0] for obj in self.objects.values()]
         
     | 
| 
      
 270 
     | 
    
         
            +
             
     | 
| 
      
 271 
     | 
    
         
            +
            class StaticTransformations:
         
     | 
| 
      
 272 
     | 
    
         
            +
                def __init__(self, calibrations_path):
         
     | 
| 
      
 273 
     | 
    
         
            +
                    import kitti360scripts.devkits.commons.loadCalibration as lc  # pylint: disable=import-error
         
     | 
| 
      
 274 
     | 
    
         
            +
             
     | 
| 
      
 275 
     | 
    
         
            +
                    cam2velo_path = os.path.join(calibrations_path, "calib_cam_to_velo.txt")
         
     | 
| 
      
 276 
     | 
    
         
            +
                    self.cam2velo = lc.loadCalibrationRigid(cam2velo_path)
         
     | 
| 
      
 277 
     | 
    
         
            +
                    perspective_path = os.path.join(calibrations_path, "perspective.txt")
         
     | 
| 
      
 278 
     | 
    
         
            +
                    self.intrinsic_calibrations = lc.loadPerspectiveIntrinsic(perspective_path)
         
     | 
| 
      
 279 
     | 
    
         
            +
                    self.cam2world = None
         
     | 
| 
      
 280 
     | 
    
         
            +
             
     | 
| 
      
 281 
     | 
    
         
            +
                def set_cam2world(self, cam2world_path):
         
     | 
| 
      
 282 
     | 
    
         
            +
                    if not os.path.isfile(cam2world_path):
         
     | 
| 
      
 283 
     | 
    
         
            +
                        logger.warn("Camera to world calibration file was not found")
         
     | 
| 
      
 284 
     | 
    
         
            +
                        return
         
     | 
| 
      
 285 
     | 
    
         
            +
             
     | 
| 
      
 286 
     | 
    
         
            +
                    cam2world_rows = np.loadtxt(cam2world_path)
         
     | 
| 
      
 287 
     | 
    
         
            +
                    cam2world_rigid = np.reshape(cam2world_rows[:, 1:], (-1, 4, 4))
         
     | 
| 
      
 288 
     | 
    
         
            +
                    frames_numbers = list(np.reshape(cam2world_rows[:, :1], (-1)).astype(int))
         
     | 
| 
      
 289 
     | 
    
         
            +
                    cam2world = {}
         
     | 
| 
      
 290 
     | 
    
         
            +
             
     | 
| 
      
 291 
     | 
    
         
            +
                    current_rigid = cam2world_rigid[0]
         
     | 
| 
      
 292 
     | 
    
         
            +
             
     | 
| 
      
 293 
     | 
    
         
            +
                    for frame_index in range(0, frames_numbers[-1]):
         
     | 
| 
      
 294 
     | 
    
         
            +
                        if frame_index in frames_numbers:
         
     | 
| 
      
 295 
     | 
    
         
            +
                            mapped_index = frames_numbers.index(frame_index)
         
     | 
| 
      
 296 
     | 
    
         
            +
                            current_rigid = cam2world_rigid[mapped_index]
         
     | 
| 
      
 297 
     | 
    
         
            +
             
     | 
| 
      
 298 
     | 
    
         
            +
                        # (Tr(cam -> world))
         
     | 
| 
      
 299 
     | 
    
         
            +
                        cam2world[frame_index] = current_rigid
         
     | 
| 
      
 300 
     | 
    
         
            +
                    self.cam2world = cam2world
         
     | 
| 
      
 301 
     | 
    
         
            +
             
     | 
| 
      
 302 
     | 
    
         
            +
                def world_to_velo_transformation(self, obj, frame_index):
         
     | 
| 
      
 303 
     | 
    
         
            +
                    # rotate_z = Rotation.from_rotvec(np.pi * np.array([0, 0, 1])).as_matrix()
         
     | 
| 
      
 304 
     | 
    
         
            +
                    # rotate_z = np.hstack((rotate_z, np.asarray([[0, 0, 0]]).T))
         
     | 
| 
      
 305 
     | 
    
         
            +
             
     | 
| 
      
 306 
     | 
    
         
            +
                    # tr0(local -> fixed_coordinates_local)
         
     | 
| 
      
 307 
     | 
    
         
            +
                    tr0 = np.asarray([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
         
     | 
| 
      
 308 
     | 
    
         
            +
             
     | 
| 
      
 309 
     | 
    
         
            +
                    # tr0(fixed_coordinates_local -> world)
         
     | 
| 
      
 310 
     | 
    
         
            +
                    tr1 = obj.transform
         
     | 
| 
      
 311 
     | 
    
         
            +
             
     | 
| 
      
 312 
     | 
    
         
            +
                    # tr2(world -> cam)
         
     | 
| 
      
 313 
     | 
    
         
            +
                    tr2 = np.linalg.inv(self.cam2world[frame_index])
         
     | 
| 
      
 314 
     | 
    
         
            +
             
     | 
| 
      
 315 
     | 
    
         
            +
                    # tr3(world -> cam)
         
     | 
| 
      
 316 
     | 
    
         
            +
                    tr3 = self.cam2velo
         
     | 
| 
      
 317 
     | 
    
         
            +
             
     | 
| 
      
 318 
     | 
    
         
            +
                    return tr3 @ tr2 @ tr1 @ tr0
         
     | 
| 
      
 319 
     | 
    
         
            +
             
     | 
| 
      
 320 
     | 
    
         
            +
                def get_extrinsic_matrix(self):
         
     | 
| 
      
 321 
     | 
    
         
            +
                    return np.linalg.inv(self.cam2velo)[:3, :4]
         
     | 
| 
      
 322 
     | 
    
         
            +
             
     | 
| 
      
 323 
     | 
    
         
            +
                def get_intrinsics_matrix(self, camera_num):
         
     | 
| 
      
 324 
     | 
    
         
            +
                    try:
         
     | 
| 
      
 325 
     | 
    
         
            +
                        matrix = self.intrinsic_calibrations[f"P_rect_0{camera_num}"][:3, :3]
         
     | 
| 
      
 326 
     | 
    
         
            +
                        return matrix
         
     | 
| 
      
 327 
     | 
    
         
            +
                    except KeyError:
         
     | 
| 
      
 328 
     | 
    
         
            +
                        logger.warn(f"Camera {camera_num} intrinsic matrix was not found")
         
     | 
| 
      
 329 
     | 
    
         
            +
                    return
         
     | 
| 
      
 330 
     | 
    
         
            +
             
     | 
| 
      
 331 
     | 
    
         
            +
            def convert_kitti_cuboid_to_supervisely_geometry(tr_matrix):
         
     | 
| 
      
 332 
     | 
    
         
            +
                import transforms3d  # pylint: disable=import-error
         
     | 
| 
      
 333 
     | 
    
         
            +
                from scipy.spatial.transform.rotation import Rotation
         
     | 
| 
      
 334 
     | 
    
         
            +
             
     | 
| 
      
 335 
     | 
    
         
            +
                Tdash, Rdash, Zdash, _ = transforms3d.affines.decompose44(tr_matrix)
         
     | 
| 
      
 336 
     | 
    
         
            +
             
     | 
| 
      
 337 
     | 
    
         
            +
                x, y, z = Tdash[0], Tdash[1], Tdash[2]
         
     | 
| 
      
 338 
     | 
    
         
            +
                position = Vector3d(x, y, z)
         
     | 
| 
      
 339 
     | 
    
         
            +
             
     | 
| 
      
 340 
     | 
    
         
            +
                rotation_angles = Rotation.from_matrix(Rdash).as_euler("xyz", degrees=False)
         
     | 
| 
      
 341 
     | 
    
         
            +
                r_x, r_y, r_z = rotation_angles[0], rotation_angles[1], rotation_angles[2]
         
     | 
| 
      
 342 
     | 
    
         
            +
             
     | 
| 
      
 343 
     | 
    
         
            +
                # Invert the bbox by adding π to the yaw while maintaining its degree relative to the world
         
     | 
| 
      
 344 
     | 
    
         
            +
                rotation = Vector3d(r_x, r_y, r_z + np.pi)
         
     | 
| 
      
 345 
     | 
    
         
            +
             
     | 
| 
      
 346 
     | 
    
         
            +
                w, h, l = Zdash[0], Zdash[1], Zdash[2]
         
     | 
| 
      
 347 
     | 
    
         
            +
                dimension = Vector3d(w, h, l)
         
     | 
| 
      
 348 
     | 
    
         
            +
             
     | 
| 
      
 349 
     | 
    
         
            +
                return Cuboid3d(position, rotation, dimension)
         
     | 
| 
      
 350 
     | 
    
         
            +
             
     | 
| 
      
 351 
     | 
    
         
            +
            def convert_bin_to_pcd(src, dst):
         
     | 
| 
      
 352 
     | 
    
         
            +
                import open3d as o3d  # pylint: disable=import-error
         
     | 
| 
      
 353 
     | 
    
         
            +
             
     | 
| 
      
 354 
     | 
    
         
            +
                try:
         
     | 
| 
      
 355 
     | 
    
         
            +
                    bin = np.fromfile(src, dtype=np.float32).reshape(-1, 4)
         
     | 
| 
      
 356 
     | 
    
         
            +
                except ValueError as e:
         
     | 
| 
      
 357 
     | 
    
         
            +
                    raise Exception(
         
     | 
| 
      
 358 
     | 
    
         
            +
                        f"Incorrect data in the KITTI 3D pointcloud file: {src}. "
         
     | 
| 
      
 359 
     | 
    
         
            +
                        f"There was an error while trying to reshape the data into a 4-column matrix: {e}. "
         
     | 
| 
      
 360 
     | 
    
         
            +
                        "Please ensure that the binary file contains a multiple of 4 elements to be "
         
     | 
| 
      
 361 
     | 
    
         
            +
                        "successfully reshaped into a (N, 4) array.\n"
         
     | 
| 
      
 362 
     | 
    
         
            +
                    )
         
     | 
| 
      
 363 
     | 
    
         
            +
                points = bin[:, 0:3]
         
     | 
| 
      
 364 
     | 
    
         
            +
                intensity = bin[:, -1]
         
     | 
| 
      
 365 
     | 
    
         
            +
                intensity_fake_rgb = np.zeros((intensity.shape[0], 3))
         
     | 
| 
      
 366 
     | 
    
         
            +
                intensity_fake_rgb[:, 0] = intensity
         
     | 
| 
      
 367 
     | 
    
         
            +
                pc = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))
         
     | 
| 
      
 368 
     | 
    
         
            +
                pc.colors = o3d.utility.Vector3dVector(intensity_fake_rgb)
         
     | 
| 
      
 369 
     | 
    
         
            +
                o3d.io.write_point_cloud(dst, pc)
         
     | 
| 
      
 370 
     | 
    
         
            +
             
     | 
| 
      
 371 
     | 
    
         
            +
             
     | 
| 
      
 372 
     | 
    
         
            +
            def convert_calib_to_image_meta(image_name, static, cam_num):
         
     | 
| 
      
 373 
     | 
    
         
            +
                intrinsic_matrix = static.get_intrinsics_matrix(cam_num)
         
     | 
| 
      
 374 
     | 
    
         
            +
                extrinsic_matrix = static.get_extrinsic_matrix()
         
     | 
| 
      
 375 
     | 
    
         
            +
             
     | 
| 
      
 376 
     | 
    
         
            +
                data = {
         
     | 
| 
      
 377 
     | 
    
         
            +
                    "name": image_name,
         
     | 
| 
      
 378 
     | 
    
         
            +
                    "meta": {
         
     | 
| 
      
 379 
     | 
    
         
            +
                        "deviceId": cam_num,
         
     | 
| 
      
 380 
     | 
    
         
            +
                        "sensorsData": {
         
     | 
| 
      
 381 
     | 
    
         
            +
                            "extrinsicMatrix": list(extrinsic_matrix.flatten().astype(float)),
         
     | 
| 
      
 382 
     | 
    
         
            +
                            "intrinsicMatrix": list(intrinsic_matrix.flatten().astype(float)),
         
     | 
| 
      
 383 
     | 
    
         
            +
                        },
         
     | 
| 
      
 384 
     | 
    
         
            +
                    },
         
     | 
| 
      
 385 
     | 
    
         
            +
                }
         
     | 
| 
      
 386 
     | 
    
         
            +
                return data
         
     | 
    
        supervisely/io/fs.py
    CHANGED
    
    | 
         @@ -205,15 +205,19 @@ def list_files_recursively( 
     | 
|
| 
       205 
205 
     | 
    
         
             
                        for filename in file_names:
         
     | 
| 
       206 
206 
     | 
    
         
             
                            yield os.path.join(dir_name, filename)
         
     | 
| 
       207 
207 
     | 
    
         | 
| 
       208 
     | 
    
         
            -
                valid_extensions =  
     | 
| 
      
 208 
     | 
    
         
            +
                valid_extensions = (
         
     | 
| 
      
 209 
     | 
    
         
            +
                    valid_extensions
         
     | 
| 
      
 210 
     | 
    
         
            +
                    if ignore_valid_extensions_case is False
         
     | 
| 
      
 211 
     | 
    
         
            +
                    else [ext.lower() for ext in valid_extensions]
         
     | 
| 
      
 212 
     | 
    
         
            +
                )
         
     | 
| 
       209 
213 
     | 
    
         
             
                files = []
         
     | 
| 
       210 
214 
     | 
    
         
             
                for file_path in file_path_generator():
         
     | 
| 
       211 
215 
     | 
    
         
             
                    file_ext = get_file_ext(file_path)
         
     | 
| 
       212 
216 
     | 
    
         
             
                    if ignore_valid_extensions_case:
         
     | 
| 
       213 
217 
     | 
    
         
             
                        file_ext.lower()
         
     | 
| 
       214 
     | 
    
         
            -
                    if (
         
     | 
| 
       215 
     | 
    
         
            -
                         
     | 
| 
       216 
     | 
    
         
            -
                    ) 
     | 
| 
      
 218 
     | 
    
         
            +
                    if (valid_extensions is None or file_ext in valid_extensions) and (
         
     | 
| 
      
 219 
     | 
    
         
            +
                        filter_fn is None or filter_fn(file_path)
         
     | 
| 
      
 220 
     | 
    
         
            +
                    ):
         
     | 
| 
       217 
221 
     | 
    
         
             
                        files.append(file_path)
         
     | 
| 
       218 
222 
     | 
    
         
             
                return files
         
     | 
| 
       219 
223 
     | 
    
         | 
| 
         @@ -1558,3 +1562,76 @@ async def touch_async(path: str) -> None: 
     | 
|
| 
       1558 
1562 
     | 
    
         
             
                async with aiofiles.open(path, "a"):
         
     | 
| 
       1559 
1563 
     | 
    
         
             
                    loop = get_or_create_event_loop()
         
     | 
| 
       1560 
1564 
     | 
    
         
             
                    await loop.run_in_executor(None, os.utime, path, None)
         
     | 
| 
      
 1565 
     | 
    
         
            +
             
     | 
| 
      
 1566 
     | 
    
         
            +
             
     | 
| 
      
 1567 
     | 
    
         
            +
            async def list_files_recursively_async(
         
     | 
| 
      
 1568 
     | 
    
         
            +
                dir_path: str,
         
     | 
| 
      
 1569 
     | 
    
         
            +
                valid_extensions: Optional[List[str]] = None,
         
     | 
| 
      
 1570 
     | 
    
         
            +
                filter_fn: Optional[Callable[[str], bool]] = None,
         
     | 
| 
      
 1571 
     | 
    
         
            +
                ignore_valid_extensions_case: bool = False,
         
     | 
| 
      
 1572 
     | 
    
         
            +
            ) -> List[str]:
         
     | 
| 
      
 1573 
     | 
    
         
            +
                """
         
     | 
| 
      
 1574 
     | 
    
         
            +
                Recursively list files in the directory asynchronously.
         
     | 
| 
      
 1575 
     | 
    
         
            +
                Returns list with all file paths.
         
     | 
| 
      
 1576 
     | 
    
         
            +
                Can be filtered by valid extensions and filter function.
         
     | 
| 
      
 1577 
     | 
    
         
            +
             
     | 
| 
      
 1578 
     | 
    
         
            +
                :param dir_path: Target directory path.
         
     | 
| 
      
 1579 
     | 
    
         
            +
                :type dir_path: str
         
     | 
| 
      
 1580 
     | 
    
         
            +
                :param valid_extensions: List of valid extensions. Default is None.
         
     | 
| 
      
 1581 
     | 
    
         
            +
                :type valid_extensions: Optional[List[str]]
         
     | 
| 
      
 1582 
     | 
    
         
            +
                :param filter_fn: Filter function. Default is None.
         
     | 
| 
      
 1583 
     | 
    
         
            +
                :type filter_fn: Optional[Callable[[str], bool]]
         
     | 
| 
      
 1584 
     | 
    
         
            +
                :param ignore_valid_extensions_case: Ignore case when checking valid extensions. Default is False.
         
     | 
| 
      
 1585 
     | 
    
         
            +
                :type ignore_valid_extensions_case: bool
         
     | 
| 
      
 1586 
     | 
    
         
            +
                :returns: List of file paths
         
     | 
| 
      
 1587 
     | 
    
         
            +
                :rtype: List[str]
         
     | 
| 
      
 1588 
     | 
    
         
            +
             
     | 
| 
      
 1589 
     | 
    
         
            +
                :Usage example:
         
     | 
| 
      
 1590 
     | 
    
         
            +
                
         
     | 
| 
      
 1591 
     | 
    
         
            +
                     .. code-block:: python
         
     | 
| 
      
 1592 
     | 
    
         
            +
                
         
     | 
| 
      
 1593 
     | 
    
         
            +
                        import supervisely as sly
         
     | 
| 
      
 1594 
     | 
    
         
            +
                
         
     | 
| 
      
 1595 
     | 
    
         
            +
                        dir_path = '/home/admin/work/projects/examples'
         
     | 
| 
      
 1596 
     | 
    
         
            +
                        loop = sly.utils.get_or_create_event_loop()
         
     | 
| 
      
 1597 
     | 
    
         
            +
                        coro = sly.fs.list_files_recursively_async(dir_path)
         
     | 
| 
      
 1598 
     | 
    
         
            +
                        if loop.is_running():
         
     | 
| 
      
 1599 
     | 
    
         
            +
                            future = asyncio.run_coroutine_threadsafe(coro, loop)
         
     | 
| 
      
 1600 
     | 
    
         
            +
                            files = future.result()
         
     | 
| 
      
 1601 
     | 
    
         
            +
                        else:
         
     | 
| 
      
 1602 
     | 
    
         
            +
                            files = loop.run_until_complete(coro)
         
     | 
| 
      
 1603 
     | 
    
         
            +
                """
         
     | 
| 
      
 1604 
     | 
    
         
            +
             
     | 
| 
      
 1605 
     | 
    
         
            +
                def sync_file_list():
         
     | 
| 
      
 1606 
     | 
    
         
            +
                    if valid_extensions and ignore_valid_extensions_case:
         
     | 
| 
      
 1607 
     | 
    
         
            +
                        valid_extensions_set = set(map(str.lower, valid_extensions))
         
     | 
| 
      
 1608 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 1609 
     | 
    
         
            +
                        valid_extensions_set = set(valid_extensions) if valid_extensions else None
         
     | 
| 
      
 1610 
     | 
    
         
            +
             
     | 
| 
      
 1611 
     | 
    
         
            +
                    files = []
         
     | 
| 
      
 1612 
     | 
    
         
            +
                    for dir_name, _, file_names in os.walk(dir_path):
         
     | 
| 
      
 1613 
     | 
    
         
            +
                        full_paths = [os.path.join(dir_name, filename) for filename in file_names]
         
     | 
| 
      
 1614 
     | 
    
         
            +
             
     | 
| 
      
 1615 
     | 
    
         
            +
                        if valid_extensions_set:
         
     | 
| 
      
 1616 
     | 
    
         
            +
                            full_paths = [
         
     | 
| 
      
 1617 
     | 
    
         
            +
                                fp
         
     | 
| 
      
 1618 
     | 
    
         
            +
                                for fp in full_paths
         
     | 
| 
      
 1619 
     | 
    
         
            +
                                if (
         
     | 
| 
      
 1620 
     | 
    
         
            +
                                    ext := (
         
     | 
| 
      
 1621 
     | 
    
         
            +
                                        os.path.splitext(fp)[1].lower()
         
     | 
| 
      
 1622 
     | 
    
         
            +
                                        if ignore_valid_extensions_case
         
     | 
| 
      
 1623 
     | 
    
         
            +
                                        else os.path.splitext(fp)[1]
         
     | 
| 
      
 1624 
     | 
    
         
            +
                                    )
         
     | 
| 
      
 1625 
     | 
    
         
            +
                                )
         
     | 
| 
      
 1626 
     | 
    
         
            +
                                in valid_extensions_set
         
     | 
| 
      
 1627 
     | 
    
         
            +
                            ]
         
     | 
| 
      
 1628 
     | 
    
         
            +
             
     | 
| 
      
 1629 
     | 
    
         
            +
                        if filter_fn:
         
     | 
| 
      
 1630 
     | 
    
         
            +
                            full_paths = [fp for fp in full_paths if filter_fn(fp)]
         
     | 
| 
      
 1631 
     | 
    
         
            +
             
     | 
| 
      
 1632 
     | 
    
         
            +
                        files.extend(full_paths)
         
     | 
| 
      
 1633 
     | 
    
         
            +
             
     | 
| 
      
 1634 
     | 
    
         
            +
                    return files
         
     | 
| 
      
 1635 
     | 
    
         
            +
             
     | 
| 
      
 1636 
     | 
    
         
            +
                loop = get_or_create_event_loop()
         
     | 
| 
      
 1637 
     | 
    
         
            +
                return await loop.run_in_executor(None, sync_file_list)
         
     | 
| 
         @@ -1347,6 +1347,7 @@ class Inference: 
     | 
|
| 
       1347 
1347 
     | 
    
         
             
                            source=images_np,
         
     | 
| 
       1348 
1348 
     | 
    
         
             
                            settings=settings,
         
     | 
| 
       1349 
1349 
     | 
    
         
             
                        )
         
     | 
| 
      
 1350 
     | 
    
         
            +
                        anns = self._exclude_duplicated_predictions(api, anns, settings, dataset_id, ids)
         
     | 
| 
       1350 
1351 
     | 
    
         
             
                        results.extend(self._format_output(anns, slides_data))
         
     | 
| 
       1351 
1352 
     | 
    
         
             
                    return results
         
     | 
| 
       1352 
1353 
     | 
    
         | 
| 
         @@ -1395,6 +1396,10 @@ class Inference: 
     | 
|
| 
       1395 
1396 
     | 
    
         
             
                            )
         
     | 
| 
       1396 
1397 
     | 
    
         
             
                            self.cache.set_project_meta(output_project_id, output_project_meta)
         
     | 
| 
       1397 
1398 
     | 
    
         | 
| 
      
 1399 
     | 
    
         
            +
                        ann = self._exclude_duplicated_predictions(
         
     | 
| 
      
 1400 
     | 
    
         
            +
                            api, anns, settings, ds_info.id, [image_id], output_project_meta
         
     | 
| 
      
 1401 
     | 
    
         
            +
                        )[0]
         
     | 
| 
      
 1402 
     | 
    
         
            +
             
     | 
| 
       1398 
1403 
     | 
    
         
             
                        logger.debug(
         
     | 
| 
       1399 
1404 
     | 
    
         
             
                            "Uploading annotation...",
         
     | 
| 
       1400 
1405 
     | 
    
         
             
                            extra={
         
     | 
| 
         @@ -1404,6 +1409,10 @@ class Inference: 
     | 
|
| 
       1404 
1409 
     | 
    
         
             
                            },
         
     | 
| 
       1405 
1410 
     | 
    
         
             
                        )
         
     | 
| 
       1406 
1411 
     | 
    
         
             
                        api.annotation.upload_ann(image_id, ann)
         
     | 
| 
      
 1412 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 1413 
     | 
    
         
            +
                        ann = self._exclude_duplicated_predictions(
         
     | 
| 
      
 1414 
     | 
    
         
            +
                            api, anns, settings, image_info.dataset_id, [image_id]
         
     | 
| 
      
 1415 
     | 
    
         
            +
                        )[0]
         
     | 
| 
       1407 
1416 
     | 
    
         | 
| 
       1408 
1417 
     | 
    
         
             
                    result = self._format_output(anns, slides_data)[0]
         
     | 
| 
       1409 
1418 
     | 
    
         
             
                    if async_inference_request_uuid is not None and ann is not None:
         
     | 
| 
         @@ -1786,6 +1795,15 @@ class Inference: 
     | 
|
| 
       1786 
1795 
     | 
    
         
             
                            batch_results = []
         
     | 
| 
       1787 
1796 
     | 
    
         
             
                            for i, ann in enumerate(anns):
         
     | 
| 
       1788 
1797 
     | 
    
         
             
                                image_info: ImageInfo = images_infos_dict[image_ids_batch[i]]
         
     | 
| 
      
 1798 
     | 
    
         
            +
                                ds_info = dataset_infos_dict[image_info.dataset_id]
         
     | 
| 
      
 1799 
     | 
    
         
            +
                                meta = output_project_metas_dict.get(ds_info.project_id, None)
         
     | 
| 
      
 1800 
     | 
    
         
            +
                                iou = settings.get("existing_objects_iou_thresh")
         
     | 
| 
      
 1801 
     | 
    
         
            +
                                if meta is None and isinstance(iou, float) and iou > 0:
         
     | 
| 
      
 1802 
     | 
    
         
            +
                                    meta = ProjectMeta.from_json(api.project.get_meta(ds_info.project_id))
         
     | 
| 
      
 1803 
     | 
    
         
            +
                                    output_project_metas_dict[ds_info.project_id] = meta
         
     | 
| 
      
 1804 
     | 
    
         
            +
                                ann = self._exclude_duplicated_predictions(
         
     | 
| 
      
 1805 
     | 
    
         
            +
                                    api, [ann], settings, ds_info.id, [image_info.id], meta
         
     | 
| 
      
 1806 
     | 
    
         
            +
                                )[0]
         
     | 
| 
       1789 
1807 
     | 
    
         
             
                                batch_results.append(
         
     | 
| 
       1790 
1808 
     | 
    
         
             
                                    {
         
     | 
| 
       1791 
1809 
     | 
    
         
             
                                        "annotation": ann.to_json(),
         
     | 
| 
         @@ -2086,6 +2104,19 @@ class Inference: 
     | 
|
| 
       2086 
2104 
     | 
    
         
             
                                    source=images_nps,
         
     | 
| 
       2087 
2105 
     | 
    
         
             
                                    settings=settings,
         
     | 
| 
       2088 
2106 
     | 
    
         
             
                                )
         
     | 
| 
      
 2107 
     | 
    
         
            +
                                iou = settings.get("existing_objects_iou_thresh")
         
     | 
| 
      
 2108 
     | 
    
         
            +
                                if output_project_meta is None and isinstance(iou, float) and iou > 0:
         
     | 
| 
      
 2109 
     | 
    
         
            +
                                    output_project_meta = ProjectMeta.from_json(
         
     | 
| 
      
 2110 
     | 
    
         
            +
                                        api.project.get_meta(project_info.id)
         
     | 
| 
      
 2111 
     | 
    
         
            +
                                    )
         
     | 
| 
      
 2112 
     | 
    
         
            +
                                anns = self._exclude_duplicated_predictions(
         
     | 
| 
      
 2113 
     | 
    
         
            +
                                    api,
         
     | 
| 
      
 2114 
     | 
    
         
            +
                                    anns,
         
     | 
| 
      
 2115 
     | 
    
         
            +
                                    settings,
         
     | 
| 
      
 2116 
     | 
    
         
            +
                                    dataset_info.id,
         
     | 
| 
      
 2117 
     | 
    
         
            +
                                    [ii.id for ii in images_infos_batch],
         
     | 
| 
      
 2118 
     | 
    
         
            +
                                    output_project_meta,
         
     | 
| 
      
 2119 
     | 
    
         
            +
                                )
         
     | 
| 
       2089 
2120 
     | 
    
         
             
                                batch_results = []
         
     | 
| 
       2090 
2121 
     | 
    
         
             
                                for i, ann in enumerate(anns):
         
     | 
| 
       2091 
2122 
     | 
    
         
             
                                    batch_results.append(
         
     | 
| 
         @@ -2935,7 +2966,9 @@ class Inference: 
     | 
|
| 
       2935 
2966 
     | 
    
         
             
                    parser = argparse.ArgumentParser(description="Run Inference Serving")
         
     | 
| 
       2936 
2967 
     | 
    
         | 
| 
       2937 
2968 
     | 
    
         
             
                    # Positional args
         
     | 
| 
       2938 
     | 
    
         
            -
                    parser.add_argument( 
     | 
| 
      
 2969 
     | 
    
         
            +
                    parser.add_argument(
         
     | 
| 
      
 2970 
     | 
    
         
            +
                        "mode", nargs="?", type=str, help="Mode of operation: 'deploy' or 'predict'"
         
     | 
| 
      
 2971 
     | 
    
         
            +
                    )
         
     | 
| 
       2939 
2972 
     | 
    
         
             
                    parser.add_argument("input", nargs="?", type=str, help="Local path to input data")
         
     | 
| 
       2940 
2973 
     | 
    
         | 
| 
       2941 
2974 
     | 
    
         
             
                    # Deploy args
         
     | 
| 
         @@ -3459,6 +3492,127 @@ class Inference: 
     | 
|
| 
       3459 
3492 
     | 
    
         
             
                            f"Checkpoint {checkpoint_url} not found in Team Files. Cannot set workflow input"
         
     | 
| 
       3460 
3493 
     | 
    
         
             
                        )
         
     | 
| 
       3461 
3494 
     | 
    
         | 
| 
      
 3495 
     | 
    
         
            +
                def _exclude_duplicated_predictions(
         
     | 
| 
      
 3496 
     | 
    
         
            +
                    self,
         
     | 
| 
      
 3497 
     | 
    
         
            +
                    api: Api,
         
     | 
| 
      
 3498 
     | 
    
         
            +
                    pred_anns: List[Annotation],
         
     | 
| 
      
 3499 
     | 
    
         
            +
                    settings: dict,
         
     | 
| 
      
 3500 
     | 
    
         
            +
                    dataset_id: int,
         
     | 
| 
      
 3501 
     | 
    
         
            +
                    gt_image_ids: List[int],
         
     | 
| 
      
 3502 
     | 
    
         
            +
                    meta: Optional[ProjectMeta] = None,
         
     | 
| 
      
 3503 
     | 
    
         
            +
                ):
         
     | 
| 
      
 3504 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3505 
     | 
    
         
            +
                    Filter out predictions that significantly overlap with ground truth (GT) objects.
         
     | 
| 
      
 3506 
     | 
    
         
            +
             
     | 
| 
      
 3507 
     | 
    
         
            +
                    This is a wrapper around the `_filter_duplicated_predictions_from_ann` method that does the following:
         
     | 
| 
      
 3508 
     | 
    
         
            +
                    - Checks inference settings for the IoU threshold (`existing_objects_iou_thresh`)
         
     | 
| 
      
 3509 
     | 
    
         
            +
                    - Gets ProjectMeta object if not provided
         
     | 
| 
      
 3510 
     | 
    
         
            +
                    - Downloads GT annotations for the specified image IDs
         
     | 
| 
      
 3511 
     | 
    
         
            +
                    - Filters out predictions that have an IoU greater than or equal to the specified threshold with any GT object
         
     | 
| 
      
 3512 
     | 
    
         
            +
             
     | 
| 
      
 3513 
     | 
    
         
            +
                    :param api: Supervisely API object
         
     | 
| 
      
 3514 
     | 
    
         
            +
                    :type api: Api
         
     | 
| 
      
 3515 
     | 
    
         
            +
                    :param pred_anns: List of Annotation objects containing predictions
         
     | 
| 
      
 3516 
     | 
    
         
            +
                    :type pred_anns: List[Annotation]
         
     | 
| 
      
 3517 
     | 
    
         
            +
                    :param settings: Inference settings
         
     | 
| 
      
 3518 
     | 
    
         
            +
                    :type settings: dict
         
     | 
| 
      
 3519 
     | 
    
         
            +
                    :param dataset_id: ID of the dataset containing the images
         
     | 
| 
      
 3520 
     | 
    
         
            +
                    :type dataset_id: int
         
     | 
| 
      
 3521 
     | 
    
         
            +
                    :param gt_image_ids: List of image IDs to filter predictions. All images should belong to the same dataset
         
     | 
| 
      
 3522 
     | 
    
         
            +
                    :type gt_image_ids: List[int]
         
     | 
| 
      
 3523 
     | 
    
         
            +
                    :param meta: ProjectMeta object
         
     | 
| 
      
 3524 
     | 
    
         
            +
                    :type meta: Optional[ProjectMeta]
         
     | 
| 
      
 3525 
     | 
    
         
            +
                    :return: List of Annotation objects containing filtered predictions
         
     | 
| 
      
 3526 
     | 
    
         
            +
                    :rtype: List[Annotation]
         
     | 
| 
      
 3527 
     | 
    
         
            +
             
     | 
| 
      
 3528 
     | 
    
         
            +
                    Notes:
         
     | 
| 
      
 3529 
     | 
    
         
            +
                    ------
         
     | 
| 
      
 3530 
     | 
    
         
            +
                    - Requires PyTorch and torchvision for IoU calculations
         
     | 
| 
      
 3531 
     | 
    
         
            +
                    - This method is useful for identifying new objects that aren't already annotated in the ground truth
         
     | 
| 
      
 3532 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3533 
     | 
    
         
            +
                    iou = settings.get("existing_objects_iou_thresh")
         
     | 
| 
      
 3534 
     | 
    
         
            +
                    if isinstance(iou, float) and 0 < iou <= 1:
         
     | 
| 
      
 3535 
     | 
    
         
            +
                        if meta is None:
         
     | 
| 
      
 3536 
     | 
    
         
            +
                            ds = api.dataset.get_info_by_id(dataset_id)
         
     | 
| 
      
 3537 
     | 
    
         
            +
                            meta = ProjectMeta.from_json(api.project.get_meta(ds.project_id))
         
     | 
| 
      
 3538 
     | 
    
         
            +
                        gt_anns = api.annotation.download_json_batch(dataset_id, gt_image_ids)
         
     | 
| 
      
 3539 
     | 
    
         
            +
                        gt_anns = [Annotation.from_json(ann, meta) for ann in gt_anns]
         
     | 
| 
      
 3540 
     | 
    
         
            +
                        for i in range(0, len(pred_anns)):
         
     | 
| 
      
 3541 
     | 
    
         
            +
                            before = len(pred_anns[i].labels)
         
     | 
| 
      
 3542 
     | 
    
         
            +
                            with Timer() as timer:
         
     | 
| 
      
 3543 
     | 
    
         
            +
                                pred_anns[i] = self._filter_duplicated_predictions_from_ann(
         
     | 
| 
      
 3544 
     | 
    
         
            +
                                    gt_anns[i], pred_anns[i], iou
         
     | 
| 
      
 3545 
     | 
    
         
            +
                                )
         
     | 
| 
      
 3546 
     | 
    
         
            +
                            after = len(pred_anns[i].labels)
         
     | 
| 
      
 3547 
     | 
    
         
            +
                            logger.debug(
         
     | 
| 
      
 3548 
     | 
    
         
            +
                                f"{[i]}: applied NMS with IoU={iou}. Before: {before}, After: {after}. Time: {timer.get_time():.3f}ms"
         
     | 
| 
      
 3549 
     | 
    
         
            +
                            )
         
     | 
| 
      
 3550 
     | 
    
         
            +
                    return pred_anns
         
     | 
| 
      
 3551 
     | 
    
         
            +
             
     | 
| 
      
 3552 
     | 
    
         
            +
                def _filter_duplicated_predictions_from_ann(
         
     | 
| 
      
 3553 
     | 
    
         
            +
                    self, gt_ann: Annotation, pred_ann: Annotation, iou_threshold: float
         
     | 
| 
      
 3554 
     | 
    
         
            +
                ) -> Annotation:
         
     | 
| 
      
 3555 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3556 
     | 
    
         
            +
                    Filter out predictions that significantly overlap with ground truth annotations.
         
     | 
| 
      
 3557 
     | 
    
         
            +
             
     | 
| 
      
 3558 
     | 
    
         
            +
                    This function compares each prediction with ground truth annotations of the same class
         
     | 
| 
      
 3559 
     | 
    
         
            +
                    and removes predictions that have an IoU (Intersection over Union) greater than or equal
         
     | 
| 
      
 3560 
     | 
    
         
            +
                    to the specified threshold with any ground truth annotation. This is useful for identifying
         
     | 
| 
      
 3561 
     | 
    
         
            +
                    new objects that aren't already annotated in the ground truth.
         
     | 
| 
      
 3562 
     | 
    
         
            +
             
     | 
| 
      
 3563 
     | 
    
         
            +
                    :param gt_ann: Annotation object containing ground truth labels
         
     | 
| 
      
 3564 
     | 
    
         
            +
                    :type gt_ann: Annotation
         
     | 
| 
      
 3565 
     | 
    
         
            +
                    :param pred_ann: Annotation object containing prediction labels to be filtered
         
     | 
| 
      
 3566 
     | 
    
         
            +
                    :type pred_ann: Annotation
         
     | 
| 
      
 3567 
     | 
    
         
            +
                    :param iou_threshold:   IoU threshold (0.0-1.0). Predictions with IoU >= threshold with any
         
     | 
| 
      
 3568 
     | 
    
         
            +
                                            ground truth box of the same class will be removed
         
     | 
| 
      
 3569 
     | 
    
         
            +
                    :type iou_threshold: float
         
     | 
| 
      
 3570 
     | 
    
         
            +
                    :return: A new annotation object containing only predictions that don't significantly
         
     | 
| 
      
 3571 
     | 
    
         
            +
                             overlap with ground truth annotations
         
     | 
| 
      
 3572 
     | 
    
         
            +
                    :rtype: Annotation
         
     | 
| 
      
 3573 
     | 
    
         
            +
             
     | 
| 
      
 3574 
     | 
    
         
            +
             
     | 
| 
      
 3575 
     | 
    
         
            +
                    Notes:
         
     | 
| 
      
 3576 
     | 
    
         
            +
                    ------
         
     | 
| 
      
 3577 
     | 
    
         
            +
                    - Predictions with classes not present in ground truth will be kept
         
     | 
| 
      
 3578 
     | 
    
         
            +
                    - Requires PyTorch and torchvision for IoU calculations
         
     | 
| 
      
 3579 
     | 
    
         
            +
                    """
         
     | 
| 
      
 3580 
     | 
    
         
            +
             
     | 
| 
      
 3581 
     | 
    
         
            +
                    try:
         
     | 
| 
      
 3582 
     | 
    
         
            +
                        import torch
         
     | 
| 
      
 3583 
     | 
    
         
            +
                        from torchvision.ops import box_iou
         
     | 
| 
      
 3584 
     | 
    
         
            +
             
     | 
| 
      
 3585 
     | 
    
         
            +
                    except ImportError:
         
     | 
| 
      
 3586 
     | 
    
         
            +
                        raise ImportError("Please install PyTorch and torchvision to use this feature.")
         
     | 
| 
      
 3587 
     | 
    
         
            +
             
     | 
| 
      
 3588 
     | 
    
         
            +
                    def _to_tensor(geom):
         
     | 
| 
      
 3589 
     | 
    
         
            +
                        return torch.tensor([geom.left, geom.top, geom.right, geom.bottom]).float()
         
     | 
| 
      
 3590 
     | 
    
         
            +
             
     | 
| 
      
 3591 
     | 
    
         
            +
                    new_labels = []
         
     | 
| 
      
 3592 
     | 
    
         
            +
                    pred_cls_bboxes = defaultdict(list)
         
     | 
| 
      
 3593 
     | 
    
         
            +
                    for label in pred_ann.labels:
         
     | 
| 
      
 3594 
     | 
    
         
            +
                        pred_cls_bboxes[label.obj_class.name].append(label)
         
     | 
| 
      
 3595 
     | 
    
         
            +
             
     | 
| 
      
 3596 
     | 
    
         
            +
                    gt_cls_bboxes = defaultdict(list)
         
     | 
| 
      
 3597 
     | 
    
         
            +
                    for label in gt_ann.labels:
         
     | 
| 
      
 3598 
     | 
    
         
            +
                        if label.obj_class.name not in pred_cls_bboxes:
         
     | 
| 
      
 3599 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 3600 
     | 
    
         
            +
                        gt_cls_bboxes[label.obj_class.name].append(label)
         
     | 
| 
      
 3601 
     | 
    
         
            +
             
     | 
| 
      
 3602 
     | 
    
         
            +
                    for name, pred in pred_cls_bboxes.items():
         
     | 
| 
      
 3603 
     | 
    
         
            +
                        gt = gt_cls_bboxes[name]
         
     | 
| 
      
 3604 
     | 
    
         
            +
                        if len(gt) == 0:
         
     | 
| 
      
 3605 
     | 
    
         
            +
                            new_labels.extend(pred)
         
     | 
| 
      
 3606 
     | 
    
         
            +
                            continue
         
     | 
| 
      
 3607 
     | 
    
         
            +
                        pred_bboxes = torch.stack([_to_tensor(l.geometry.to_bbox()) for l in pred]).float()
         
     | 
| 
      
 3608 
     | 
    
         
            +
                        gt_bboxes = torch.stack([_to_tensor(l.geometry.to_bbox()) for l in gt]).float()
         
     | 
| 
      
 3609 
     | 
    
         
            +
                        iou_matrix = box_iou(pred_bboxes, gt_bboxes)
         
     | 
| 
      
 3610 
     | 
    
         
            +
                        iou_matrix = iou_matrix.cpu().numpy()
         
     | 
| 
      
 3611 
     | 
    
         
            +
                        keep_indices = np.where(np.all(iou_matrix < iou_threshold, axis=1))[0]
         
     | 
| 
      
 3612 
     | 
    
         
            +
                        new_labels.extend([pred[i] for i in keep_indices])
         
     | 
| 
      
 3613 
     | 
    
         
            +
             
     | 
| 
      
 3614 
     | 
    
         
            +
                    return pred_ann.clone(labels=new_labels)
         
     | 
| 
      
 3615 
     | 
    
         
            +
             
     | 
| 
       3462 
3616 
     | 
    
         | 
| 
       3463 
3617 
     | 
    
         
             
            def _get_log_extra_for_inference_request(inference_request_uuid, inference_request: dict):
         
     | 
| 
       3464 
3618 
     | 
    
         
             
                log_extra = {
         
     |